LD1R and LD2R variants of c2 microkernel

- Instead of 1 LD1 and 4 DUP, use 4 LD1R or 2 LD2R

PiperOrigin-RevId: 410613731
diff --git a/BUILD.bazel b/BUILD.bazel
index 0949bf2..0d1f990 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -2479,6 +2479,8 @@
     "src/qc8-dwconv/gen/up32x9-minmax-fp32-neon-mul16.c",
     "src/qc8-dwconv/gen/up32x25-minmax-fp32-neon-mul16.c",
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qc8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-dup.c",
@@ -2486,6 +2488,8 @@
     "src/qc8-gemm/gen/1x8c8-minmax-fp32-neon-mlal.c",
     "src/qc8-gemm/gen/1x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qc8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-dup.c",
@@ -2493,6 +2497,8 @@
     "src/qc8-gemm/gen/2x8c8-minmax-fp32-neon-mlal.c",
     "src/qc8-gemm/gen/4x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-dup.c",
@@ -2500,6 +2506,8 @@
     "src/qc8-igemm/gen/1x8c8-minmax-fp32-neon-mlal.c",
     "src/qc8-igemm/gen/1x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-dup.c",
@@ -2554,11 +2562,17 @@
     "src/qs8-gemm/gen/1x8-minmax-gemmlowp-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/1x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c",
     "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qs8-gemm/gen/1x8c2s4-minmax-rndnu-neon-mlal.c",
@@ -2580,8 +2594,12 @@
     "src/qs8-gemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qs8-gemm/gen/1x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2595,11 +2613,17 @@
     "src/qs8-gemm/gen/2x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-gemm/gen/2x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c",
     "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qs8-gemm/gen/2x8c2s4-minmax-rndnu-neon-mlal.c",
@@ -2618,8 +2642,12 @@
     "src/qs8-gemm/gen/2x16-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-gemm/gen/2x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2633,8 +2661,12 @@
     "src/qs8-gemm/gen/3x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-gemm/gen/3x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mull.c",
@@ -2648,8 +2680,12 @@
     "src/qs8-gemm/gen/3x16-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-gemm/gen/3x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2663,8 +2699,12 @@
     "src/qs8-gemm/gen/4x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-gemm/gen/4x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mull.c",
@@ -2681,8 +2721,12 @@
     "src/qs8-gemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qs8-gemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2699,11 +2743,17 @@
     "src/qs8-igemm/gen/1x8-minmax-gemmlowp-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/1x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c",
     "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qs8-igemm/gen/1x8c2s4-minmax-rndnu-neon-mlal.c",
@@ -2725,8 +2775,12 @@
     "src/qs8-igemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qs8-igemm/gen/1x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2740,11 +2794,17 @@
     "src/qs8-igemm/gen/2x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-igemm/gen/2x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c",
     "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c",
     "src/qs8-igemm/gen/2x8c2s4-minmax-rndnu-neon-mlal.c",
@@ -2763,8 +2823,12 @@
     "src/qs8-igemm/gen/2x16-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-igemm/gen/2x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2778,8 +2842,12 @@
     "src/qs8-igemm/gen/3x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-igemm/gen/3x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mull.c",
@@ -2793,8 +2861,12 @@
     "src/qs8-igemm/gen/3x16-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-igemm/gen/3x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mull.c",
@@ -2808,8 +2880,12 @@
     "src/qs8-igemm/gen/4x8-minmax-gemmlowp-neon-mlal-lane.c",
     "src/qs8-igemm/gen/4x8-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mull.c",
@@ -2826,8 +2902,12 @@
     "src/qs8-igemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c",
     "src/qs8-igemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c",
     "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c",
     "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c",
     "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c",
     "src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c",
     "src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mlal.c",
     "src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mull.c",
@@ -3395,6 +3475,8 @@
     "src/qc8-dwconv/gen/up32x9-minmax-fp32-neonv8-mul16.c",
     "src/qc8-dwconv/gen/up32x25-minmax-fp32-neonv8-mul16.c",
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qc8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3402,6 +3484,8 @@
     "src/qc8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qc8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qc8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3409,6 +3493,8 @@
     "src/qc8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qc8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3416,6 +3502,8 @@
     "src/qc8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qc8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3431,6 +3519,8 @@
     "src/qs8-dwconv/gen/up32x9-minmax-fp32-neonv8-mul16.c",
     "src/qs8-dwconv/gen/up32x25-minmax-fp32-neonv8-mul16.c",
     "src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qs8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3438,6 +3528,8 @@
     "src/qs8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qs8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qs8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3445,6 +3537,8 @@
     "src/qs8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qs8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c",
@@ -3452,6 +3546,8 @@
     "src/qs8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c",
     "src/qs8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c",
     "src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c",
     "src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c",
     "src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 809f82e..0882f50 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1508,6 +1508,8 @@
   src/qc8-dwconv/gen/up32x9-minmax-fp32-neon-mul16.c
   src/qc8-dwconv/gen/up32x25-minmax-fp32-neon-mul16.c
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c
   src/qc8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-dup.c
@@ -1515,6 +1517,8 @@
   src/qc8-gemm/gen/1x8c8-minmax-fp32-neon-mlal.c
   src/qc8-gemm/gen/1x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c
   src/qc8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-dup.c
@@ -1522,6 +1526,8 @@
   src/qc8-gemm/gen/2x8c8-minmax-fp32-neon-mlal.c
   src/qc8-gemm/gen/4x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c
   src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-dup.c
@@ -1529,6 +1535,8 @@
   src/qc8-igemm/gen/1x8c8-minmax-fp32-neon-mlal.c
   src/qc8-igemm/gen/1x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c
   src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-dup.c
@@ -1583,11 +1591,17 @@
   src/qs8-gemm/gen/1x8-minmax-gemmlowp-neon-mull-addw-dup.c
   src/qs8-gemm/gen/1x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
   src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c
   src/qs8-gemm/gen/1x8c2s4-minmax-rndnu-neon-mlal.c
@@ -1609,8 +1623,12 @@
   src/qs8-gemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c
   src/qs8-gemm/gen/1x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mull.c
@@ -1624,11 +1642,17 @@
   src/qs8-gemm/gen/2x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-gemm/gen/2x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
   src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c
   src/qs8-gemm/gen/2x8c2s4-minmax-rndnu-neon-mlal.c
@@ -1647,8 +1671,12 @@
   src/qs8-gemm/gen/2x16-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-gemm/gen/2x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mull.c
@@ -1662,8 +1690,12 @@
   src/qs8-gemm/gen/3x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-gemm/gen/3x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mull.c
@@ -1677,8 +1709,12 @@
   src/qs8-gemm/gen/3x16-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-gemm/gen/3x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mull.c
@@ -1692,8 +1728,12 @@
   src/qs8-gemm/gen/4x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-gemm/gen/4x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mull.c
@@ -1710,8 +1750,12 @@
   src/qs8-gemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c
   src/qs8-gemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mull.c
@@ -1728,11 +1772,17 @@
   src/qs8-igemm/gen/1x8-minmax-gemmlowp-neon-mull-addw-dup.c
   src/qs8-igemm/gen/1x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
   src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal.c
   src/qs8-igemm/gen/1x8c2s4-minmax-rndnu-neon-mlal.c
@@ -1754,8 +1804,12 @@
   src/qs8-igemm/gen/1x16-minmax-rndnu-neon-mlal-lane.c
   src/qs8-igemm/gen/1x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mull.c
@@ -1769,11 +1823,17 @@
   src/qs8-igemm/gen/2x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-igemm/gen/2x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
   src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
   src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
   src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal.c
   src/qs8-igemm/gen/2x8c2s4-minmax-rndnu-neon-mlal.c
@@ -1792,8 +1852,12 @@
   src/qs8-igemm/gen/2x16-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-igemm/gen/2x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mull.c
@@ -1807,8 +1871,12 @@
   src/qs8-igemm/gen/3x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-igemm/gen/3x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mull.c
@@ -1822,8 +1890,12 @@
   src/qs8-igemm/gen/3x16-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-igemm/gen/3x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mull.c
@@ -1837,8 +1909,12 @@
   src/qs8-igemm/gen/4x8-minmax-gemmlowp-neon-mlal-lane.c
   src/qs8-igemm/gen/4x8-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mull.c
@@ -1855,8 +1931,12 @@
   src/qs8-igemm/gen/4x16-minmax-rndnu-neon-mlal-lane.c
   src/qs8-igemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c
   src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
+  src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
+  src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
   src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
   src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c
+  src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
+  src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
   src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c
   src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mlal.c
   src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mull.c
@@ -2416,6 +2496,8 @@
   src/qc8-dwconv/gen/up32x9-minmax-fp32-neonv8-mul16.c
   src/qc8-dwconv/gen/up32x25-minmax-fp32-neonv8-mul16.c
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qc8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2423,6 +2505,8 @@
   src/qc8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c
   src/qc8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qc8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2430,6 +2514,8 @@
   src/qc8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal.c
   src/qc8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2437,6 +2523,8 @@
   src/qc8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c
   src/qc8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2452,6 +2540,8 @@
   src/qs8-dwconv/gen/up32x9-minmax-fp32-neonv8-mul16.c
   src/qs8-dwconv/gen/up32x25-minmax-fp32-neonv8-mul16.c
   src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qs8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2459,6 +2549,8 @@
   src/qs8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c
   src/qs8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qs8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2466,6 +2558,8 @@
   src/qs8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal.c
   src/qs8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-dup.c
@@ -2473,6 +2567,8 @@
   src/qs8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal.c
   src/qs8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
+  src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
   src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
   src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal.c
   src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-dup.c
diff --git a/bench/qs8-gemm-e2e.cc b/bench/qs8-gemm-e2e.cc
index 5d14e0f..28405cf 100644
--- a/bench/qs8-gemm-e2e.cc
+++ b/bench/qs8-gemm-e2e.cc
@@ -330,6 +330,126 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
+  static void qs8_gemm_2x8c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mlal_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mlal_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_2x8c2__neon_mlal_ld4r(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld4r,
@@ -630,6 +750,126 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
+  static void qs8_gemm_2x8c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mull_ld1r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mull_ld2r(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_2x8c2__neon_mull_ld4r(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r,
@@ -1181,6 +1421,34 @@
   BENCHMARK_QS8_END2END(qs8_gemm_4x8c2__neon_mull_dup);
   BENCHMARK_QS8_END2END(qs8_gemm_4x16c2__neon_mull_dup);
 
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mlal_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mlal_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mlal_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c2__neon_mlal_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c2__neon_mlal_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c2__neon_mlal_ld1r);
+
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mull_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mull_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mull_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c2__neon_mull_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c2__neon_mull_ld1r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c2__neon_mull_ld1r);
+
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mlal_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mlal_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mlal_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c2__neon_mlal_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c2__neon_mlal_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c2__neon_mlal_ld2r);
+
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mull_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mull_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mull_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c2__neon_mull_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c2__neon_mull_ld2r);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c2__neon_mull_ld2r);
+
   BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mlal_ld4r);
   BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mlal_ld4r);
   BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mlal_ld4r);
diff --git a/bench/qs8-gemm.cc b/bench/qs8-gemm.cc
index 574c88a..a7b4ccc 100644
--- a/bench/qs8-gemm.cc
+++ b/bench/qs8-gemm.cc
@@ -411,6 +411,134 @@
     GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_dup, 4, 16, 2, 1,
       xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
   }
+  static void qs8_gemm_1x8c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, 1, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, 2, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, 3, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, 4, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x16c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, 1, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, 2, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, 3, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mull_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, 4, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x8c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, 1, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, 2, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, 3, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, 4, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x16c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, 1, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, 2, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, 3, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mlal_ld1r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, 4, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x8c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, 1, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, 2, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, 3, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, 4, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x16c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, 1, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, 2, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, 3, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mull_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, 4, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x8c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, 1, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, 2, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, 3, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, 4, 8, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_1x16c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, 1, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, 2, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, 3, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c2__neon_mlal_ld2r(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, 4, 16, 2, 1,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_1x8c2__neon_mull_ld4r(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r, 1, 8, 2, 1,
       xnn_init_qs8_conv_minmax_rndnu_neon_params, benchmark::utils::CheckNEON);
@@ -763,6 +891,38 @@
   BENCHMARK_GEMM(qs8_gemm_2x16c2__neon_mlal_dup)
   BENCHMARK_GEMM(qs8_gemm_3x16c2__neon_mlal_dup)
   BENCHMARK_GEMM(qs8_gemm_4x16c2__neon_mlal_dup)
+  BENCHMARK_GEMM(qs8_gemm_1x8c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_2x8c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_3x8c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_4x8c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_1x16c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_2x16c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_3x16c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_4x16c2__neon_mull_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_1x8c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_2x8c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_3x8c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_4x8c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_1x16c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_2x16c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_3x16c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_4x16c2__neon_mlal_ld1r)
+  BENCHMARK_GEMM(qs8_gemm_1x8c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_2x8c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_3x8c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_4x8c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_1x16c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_2x16c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_3x16c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_4x16c2__neon_mull_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_1x8c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_2x8c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_3x8c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_4x8c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_1x16c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_2x16c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_3x16c2__neon_mlal_ld2r)
+  BENCHMARK_GEMM(qs8_gemm_4x16c2__neon_mlal_ld2r)
   BENCHMARK_GEMM(qs8_gemm_1x8c2__neon_mull_ld4r)
   BENCHMARK_GEMM(qs8_gemm_2x8c2__neon_mull_ld4r)
   BENCHMARK_GEMM(qs8_gemm_3x8c2__neon_mull_ld4r)
diff --git a/scripts/generate-qs8-gemm.sh b/scripts/generate-qs8-gemm.sh
index 32f8e0d..9cce8c3 100755
--- a/scripts/generate-qs8-gemm.sh
+++ b/scripts/generate-qs8-gemm.sh
@@ -239,69 +239,131 @@
 tools/xngen src/qs8-gemm/neon-mull-addw-dup.c.in -D MR=4 -D NR=16 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -o src/qs8-gemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c &
 
 ### C2 micro-kernels
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=0 -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=0 -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=DUP  -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=DUP  -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=0 -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=0 -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=DUP  -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=DUP  -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=0 -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=0 -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=DUP  -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=DUP  -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+
+### C2 LD1R micro-kernels
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD1R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD1R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD1R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD1R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD1R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD1R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+
+### C2 LD2R micro-kernels
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD2R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD2R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD2R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD2R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD2R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD2R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
 
 ### C2 LD4R micro-kernels
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=1 -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=1 -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD4R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD4R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=1 -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=1 -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD4R -o src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD4R -o src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
 
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=1 -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
-tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=1 -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD4R -o src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-gemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD4R -o src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
 
 ### C2S4 micro-kernels
 tools/xngen src/qs8-gemm/c2-neon-mull-shuffle.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c2s4-minmax-rndnu-neon-mull.c &
diff --git a/scripts/generate-qs8-igemm.sh b/scripts/generate-qs8-igemm.sh
index b266b6c..705c470 100755
--- a/scripts/generate-qs8-igemm.sh
+++ b/scripts/generate-qs8-igemm.sh
@@ -245,69 +245,131 @@
 tools/xngen src/qs8-igemm/neon-mull-addw-dup.c.in -D MR=4 -D NR=16 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -o src/qs8-igemm/gen/4x16-minmax-rndnu-neon-mull-addw-dup.c &
 
 ### C2 micro-kernels
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=0 -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=0 -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=DUP  -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=DUP  -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=0 -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=0 -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=DUP  -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=DUP  -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=0 -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=0 -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=DUP  -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=DUP  -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=0 -o src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -D DUP=DUP  -o src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c &
+
+### C2 LD1R micro-kernels
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD1R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD1R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD1R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD1R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD1R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD1R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD1R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c &
+
+### C2 LD2R micro-kernels
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD2R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD2R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD2R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD2R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD2R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD2R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD2R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c &
 
 ### C2 LD4R micro-kernels
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld4r.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D LD4R=1 -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -D DUP=LD4R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=1 -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D LD4R=1 -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD4R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -D DUP=LD4R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=1 -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D LD4R=1 -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD4R -o src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -D DUP=LD4R -o src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
 
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=1 -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
-tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D LD4R=1 -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD4R -o src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
+tools/xngen src/qs8-igemm/c2-neon-mull-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -D DUP=LD4R -o src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c &
 
 ### C2S4 micro-kernels
 tools/xngen src/qs8-igemm/c2-neon-mull-shuffle.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c2s4-minmax-rndnu-neon-mull.c &
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
index 754ceda..9dcb6a3 100644
--- a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..f9a2344
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,251 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..75bb9eb
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,245 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
index 9838b28..02409ac 100644
--- a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
index da522ce..2591d2a 100644
--- a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -49,7 +49,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -61,6 +60,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -71,6 +71,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -81,6 +82,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -91,6 +93,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..caac418
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,246 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..1d9ddc4
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index 6add293..877fd32 100644
--- a/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -49,7 +49,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -61,6 +60,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -71,6 +71,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -81,6 +82,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -91,6 +93,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
index 9c66620..a86d643 100644
--- a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
@@ -58,7 +58,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..a3cf92e
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,359 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..4b0ce7c
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,347 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
index 33c70e1..3faf5d2 100644
--- a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -58,7 +58,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
index c055f4b..710785c 100644
--- a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -59,7 +59,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -73,6 +72,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -91,6 +91,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -109,6 +110,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -127,6 +129,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..41b87fd
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,348 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..9cfc264
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,336 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index a11331b..8bfc34b 100644
--- a/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -59,7 +59,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -73,6 +72,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -91,6 +91,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -109,6 +110,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -127,6 +129,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
index 675fa91..12a6cd5 100644
--- a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..996ad67
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,264 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..bc913da
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
index 3fac0e2..e987313 100644
--- a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
index 8f69175..7f49b31 100644
--- a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..c4636d4
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,259 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..27d882e
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index 41be06b..254336d 100644
--- a/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
index f80417e..7666551 100644
--- a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..08ab2b2
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,373 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..e7de19b
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,361 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
index d749b34..1717fc0 100644
--- a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
index b9180c8..d8b1658 100644
--- a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..3c07861
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,362 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..ff5f266
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,350 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index 6a911a7..78e1459 100644
--- a/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/c2-neon-mull-dup.c.in b/src/qs8-gemm/c2-neon-mull-dup.c.in
index a4ad090..81ecdb4 100644
--- a/src/qs8-gemm/c2-neon-mull-dup.c.in
+++ b/src/qs8-gemm/c2-neon-mull-dup.c.in
@@ -8,6 +8,7 @@
 $assert 8 <= NR <= 16
 $assert REQUANTIZATION in ["FP32", "GEMMLOWP", "RNDNU"]
 $assert not CHANNELWISE or REQUANTIZATION == "FP32"
+$assert DUP in ["DUP", "LD1R", "LD2R", "LD4R"]
 #include <assert.h>
 
 #include <arm_neon.h>
@@ -23,7 +24,7 @@
 $if REQUANTIZATION == "FP32" and CHANNELWISE and not ARMV8:
   $PARAMS_STRUCT = "neon_fp32"
 $ISA = "neonv8" if ARMV8 else "neon"
-void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${"ld4r" if LD4R else "dup"}(
+void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${DUP.lower()}(
     size_t mr,
     size_t nc,
     size_t kc,
@@ -78,25 +79,45 @@
     $if MLA:
       while (k >= 16 * sizeof(int8_t)) {
         $for M in range(MR):
-          $if LD4R:
+          $if DUP == "LD4R":
             const int16x4x4_t va${M}x0 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
             const int16x4x4_t va${M}x1 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+          $elif DUP == "LD2R":
+            const int16x4x2_t va${M}0x0 = vld2_dup_s16((const void*)a${M});
+            const int16x4x2_t va${M}1x0 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+            const int16x4x2_t va${M}0x1 = vld2_dup_s16((const void*)a${M});
+            const int16x4x2_t va${M}1x1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+          $elif DUP == "LD1R":
+            const int16x4_t va${M}0x0 = vld1_dup_s16((const void*)a${M});
+            const int16x4_t va${M}1x0 = vld1_dup_s16((const void*)(a${M} + 2));
+            const int16x4_t va${M}2x0 = vld1_dup_s16((const void*)(a${M} + 4));
+            const int16x4_t va${M}3x0 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
+            const int16x4_t va${M}0x1 = vld1_dup_s16((const void*)a${M});
+            const int16x4_t va${M}1x1 = vld1_dup_s16((const void*)(a${M} + 2));
+            const int16x4_t va${M}2x1 = vld1_dup_s16((const void*)(a${M} + 4));
+            const int16x4_t va${M}3x1 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
           $else:
             const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
             const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
-
         $for K in range(4):
           $for N in range(0, NR, 4):
             const int8x8_t vb${ABC[N:N+4]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
 
         $for K in range(4):
           $for M in range(MR):
-            $if LD4R:
+            $if DUP == "LD4R":
               const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}x0.val[${K}]);
               const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}x1.val[${K}]);
+            $elif DUP == "LD2R":
+              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${int(K/2)}x0.val[${K%2}]);
+              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${int(K/2)}x1.val[${K%2}]);
+            $elif DUP == "LD1R":
+              const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${K}x0);
+              const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${K}x1);
             $else:
               const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x0), ${K}));
               const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x1), ${K}));
+
           $for N in range(0, NR, 4):
             $for M in range(MR):
               int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}c${K}x0);
@@ -111,8 +132,16 @@
 
     ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
       $for M in range(MR):
-        $if LD4R:
+        $if DUP == "LD4R":
           const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+        $elif DUP == "LD2R":
+          const int16x4x2_t va${M}0 = vld2_dup_s16((const void*)a${M});
+          const int16x4x2_t va${M}1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+        $elif DUP == "LD1R":
+          const int16x4_t va${M}0 = vld1_dup_s16((const void*)a${M});
+          const int16x4_t va${M}1 = vld1_dup_s16((const void*)(a${M} + 2));
+          const int16x4_t va${M}2 = vld1_dup_s16((const void*)(a${M} + 4));
+          const int16x4_t va${M}3 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
         $else:
           const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
 
@@ -122,8 +151,12 @@
 
       $for K in range(4):
         $for M in range(MR):
-          $if LD4R:
+          $if DUP == "LD4R":
             const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}.val[${K}]);
+          $elif DUP == "LD2R":
+            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${int(K/2)}.val[${K%2}]);
+          $elif DUP == "LD1R":
+            const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${K});
           $else:
             const int8x8_t va${M}c${K} = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), ${K}));
 
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
index 903f4e9..cc9bbb1 100644
--- a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -50,7 +50,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -70,6 +69,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -88,6 +88,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -106,6 +107,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -124,6 +126,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..8a171e2
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..9e0c8a9
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,338 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
index 3f38a19..a3426d7 100644
--- a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -50,7 +50,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -70,6 +69,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -88,6 +88,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -106,6 +107,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -124,6 +126,7 @@
       vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..bb4cd72
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,239 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..69da097
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,237 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
index b867745..9d79067 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..ff445ee
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,250 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..ad7f675
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,244 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
index fd23a86..198c6a1 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
index 7bd75bf..b082579 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -49,7 +49,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -61,6 +60,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -71,6 +71,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -81,6 +82,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -91,6 +93,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..612fc7d
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,245 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..a4f9cb7
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,239 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index 3940a47..2947799 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -49,7 +49,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -61,6 +60,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -71,6 +71,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -81,6 +82,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -91,6 +93,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
index 8b44a28..8725410 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
index a4fd882..d05214e 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..5db72dd
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,247 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..30b732a
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,241 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
index be24df9..f75c6fc 100644
--- a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -48,7 +48,6 @@
     while (k >= 16 * sizeof(int8_t)) {
       const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -60,6 +59,7 @@
 
       const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -70,6 +70,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
       const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -80,6 +81,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
       const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -90,6 +92,7 @@
       vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
       const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..c8093fd
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,182 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..116d4be
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,180 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
index 588fc17..6db02ae 100644
--- a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -62,7 +62,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -84,6 +83,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -116,6 +116,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -148,6 +149,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -180,6 +182,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..5ac7029
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,513 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..82daae8
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,501 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
index ebe509a..17789c3 100644
--- a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -62,7 +62,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -84,6 +83,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -116,6 +116,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -148,6 +149,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -180,6 +182,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..dc4797f
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..ea848a2
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,340 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
index 4829bd8..28e5e6d 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
@@ -58,7 +58,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..4a1d7ff
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,358 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..32bc22b
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,346 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
index 5ffff20..fa9baac 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -58,7 +58,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
index b1f8513..3fb0a65 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -59,7 +59,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -73,6 +72,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -91,6 +91,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -109,6 +110,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -127,6 +129,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..c8da5e5
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,347 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..fb5921f
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,335 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index a844e75..48d6da1 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -59,7 +59,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -73,6 +72,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -91,6 +91,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -109,6 +110,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -127,6 +129,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
index f47bacf..ebaa6df 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -58,7 +58,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
index 9e0cb66..3ecec79 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -58,7 +58,6 @@
       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..f5a97f0
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,349 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..09184f0
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,337 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 10df249..93ebfef 100644
--- a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -58,7 +58,6 @@
       const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
       const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -72,6 +71,7 @@
       const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
       const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -90,6 +90,7 @@
       const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
       const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -108,6 +109,7 @@
       const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
       const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -126,6 +128,7 @@
       const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
       const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..01e29d4
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,244 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..00e8a0e
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
index cbb9a08..f635ade 100644
--- a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -74,7 +74,6 @@
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
       const int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -98,6 +97,7 @@
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
       const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -144,6 +144,7 @@
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
       const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -190,6 +191,7 @@
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
       const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -236,6 +238,7 @@
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
       const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..79de14d
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,686 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..87c387c
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,668 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
index 8997643..de8b456 100644
--- a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -74,7 +74,6 @@
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va2x0 = vld4_dup_s16((const void*)a2); a2 += 8;
       const int16x4x4_t va2x1 = vld4_dup_s16((const void*)a2); a2 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -98,6 +97,7 @@
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
       const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -144,6 +144,7 @@
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
       const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -190,6 +191,7 @@
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
       const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -236,6 +238,7 @@
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
       const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..6ece734
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,453 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..520d929
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,447 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
index ac8a2c7..ef7ade9 100644
--- a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -68,7 +68,6 @@
       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
       const int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -84,6 +83,7 @@
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
       const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -110,6 +110,7 @@
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
       const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -136,6 +137,7 @@
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
       const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -162,6 +164,7 @@
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
       const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..d751b0d
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,457 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..d77a597
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,439 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 874353f..b9531dc 100644
--- a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -68,7 +68,6 @@
       const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
       const int16x4x4_t va2x0 = vld4_dup_s16((const void*)a2); a2 += 8;
       const int16x4x4_t va2x1 = vld4_dup_s16((const void*)a2); a2 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -84,6 +83,7 @@
       const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
       const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -110,6 +110,7 @@
       const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
       const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -136,6 +137,7 @@
       const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
       const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -162,6 +164,7 @@
       const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
       const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..5b3cd21
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,312 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..a797d6b
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,306 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
index 638c1d6..1cc4326 100644
--- a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -86,7 +86,6 @@
       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
       const int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
       const int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -112,6 +111,7 @@
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
       const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
       const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -172,6 +172,7 @@
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
       const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
       const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -232,6 +233,7 @@
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
       const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
       const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -292,6 +294,7 @@
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
       const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
       const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..22a91d1
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,855 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+      const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+      const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+      const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+      int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+      vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+      int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+      vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+      const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+      const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+      int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+      vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+      int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+      vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+      const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+      const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+      int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+      vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+      int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+      vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+      const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+      const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+      int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+      vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+      int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+      vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+          const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..9f7acbe
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,831 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+      const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+      const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+      const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+      int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+      int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+      int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+      const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+      vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+      vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+      vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+      int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+      int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+      int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+      const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+      vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+      vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+      vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+      const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+      const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+      int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+      int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+      int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+      const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+      vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+      vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+      vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+      int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+      int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+      int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+      const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+      vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+      vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+      vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+      const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+      const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+      int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+      int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+      int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+      const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+      vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+      vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+      vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+      int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+      int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+      int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+      const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+      vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+      vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+      vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+      const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+      const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+      int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+      int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+      int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+      const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+      vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+      vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+      vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+      int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+      int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+      int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+      const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+      vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+      vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+      vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+          const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
index 3b8caee..735eb40 100644
--- a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -86,7 +86,6 @@
       const int16x4x4_t va2x1 = vld4_dup_s16((const void*)a2); a2 += 8;
       const int16x4x4_t va3x0 = vld4_dup_s16((const void*)a3); a3 += 8;
       const int16x4x4_t va3x1 = vld4_dup_s16((const void*)a3); a3 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -112,6 +111,7 @@
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
       const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
       const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -172,6 +172,7 @@
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
       const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
       const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -232,6 +233,7 @@
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
       const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
       const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -292,6 +294,7 @@
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
       const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
       const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..66fed21
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,558 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+          const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..6a4c1b2
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,550 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+      const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+      const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+      const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+      const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+      const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+      const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+      const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+      const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+      const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+      const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+      const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+      const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+      const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+      const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+      const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+      const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+      const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+      const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+      const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+      const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+      const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+      const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+      vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+      const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+      vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+      vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+      const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+      vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+      vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+      const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+      vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+      vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+      const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+      vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+          const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+          const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+          const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+          const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
index e0be6f1..aa9399f 100644
--- a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -78,7 +78,6 @@
       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
       const int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
       const int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -96,6 +95,7 @@
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
       const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
       const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -130,6 +130,7 @@
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
       const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
       const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -164,6 +165,7 @@
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
       const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
       const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -198,6 +200,7 @@
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
       const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
       const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..8cdfe12
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,559 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+      const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+      const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+      const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+      const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+      const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+      const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+      const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+      const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+      const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..6caec1d
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,535 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+      const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+      const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+      const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+      const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+      const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+      const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+      const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+      const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+      const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+      int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+      int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+      int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+      int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+      const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+      vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+      vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+      vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+      int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+      int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+      int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+      const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+      vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+      vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+      vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+      const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+      const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+      const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+      const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+      const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+      const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+      const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+      int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+      int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+      int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+      int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+      const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+      vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+      vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+      vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+      int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+      int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+      int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+      const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+      vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+      vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+      vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+      const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+      const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+      const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+      const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+      const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+      const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+      const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+      int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+      int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+      int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+      int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+      const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+      vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+      vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+      vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+      int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+      int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+      int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+      const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+      vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+      vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+      vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+      const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+      const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+      const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+      const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+      const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+      const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+      const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+      int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+      int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+      int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+      int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+      const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+      vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+      vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+      vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+      int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+      int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+      int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+      const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+      vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+      vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+      vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 88aa55f..98d5f6b 100644
--- a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -78,7 +78,6 @@
       const int16x4x4_t va2x1 = vld4_dup_s16((const void*)a2); a2 += 8;
       const int16x4x4_t va3x0 = vld4_dup_s16((const void*)a3); a3 += 8;
       const int16x4x4_t va3x1 = vld4_dup_s16((const void*)a3); a3 += 8;
-
       const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
       const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -96,6 +95,7 @@
       const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
       const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
       const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
       int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
       int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
       int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -130,6 +130,7 @@
       const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
       const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
       const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
       int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
       int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
       int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -164,6 +165,7 @@
       const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
       const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
       const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
       int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
       int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
       int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -198,6 +200,7 @@
       const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
       const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
       const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
       int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
       int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
       int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..2b24819
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,374 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+      const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+      const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+      const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+      const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+      const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+      const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+      const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+      const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+      const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+      const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+      const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+      const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+      const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+      const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+      const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..00d300c
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,366 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+      const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+      const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+      const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+      const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+      const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+      const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+      const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+      const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+      const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+      const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+      const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+      const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+      const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+      const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+      const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+      const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+      const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+      const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+      const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+      const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+      const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+      const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+      const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+      const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+      const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+      const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+      const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+      const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+      const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+      const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+      const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+      const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+      const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+      const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+      const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+      const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+      const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+      const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+      const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+      const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+      const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+      const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+      const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+      const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+      vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+      const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+      vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+      const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+      vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+      const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+      vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+      const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+      vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+      const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+      vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+      const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+      vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+      const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+      vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+      if (k > 2 * sizeof(int8_t)) {
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+          const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+          const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+          const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+          const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+          const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+          const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+          const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+          const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        }
+      }
+    }
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/c2-neon-mull-dup.c.in b/src/qs8-igemm/c2-neon-mull-dup.c.in
index 9b9cb02..b6d95ad 100644
--- a/src/qs8-igemm/c2-neon-mull-dup.c.in
+++ b/src/qs8-igemm/c2-neon-mull-dup.c.in
@@ -8,6 +8,7 @@
 $assert 8 <= NR <= 16
 $assert REQUANTIZATION in ["FP32", "GEMMLOWP", "RNDNU"]
 $assert not CHANNELWISE or REQUANTIZATION == "FP32"
+$assert DUP in ["DUP", "LD1R", "LD2R", "LD4R"]
 #include <assert.h>
 
 #include <arm_neon.h>
@@ -23,7 +24,7 @@
 $if REQUANTIZATION == "FP32" and CHANNELWISE and not ARMV8:
   $PARAMS_STRUCT = "neon_fp32"
 $ISA = "neonv8" if ARMV8 else "neon"
-void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${"ld4r" if LD4R else "dup"}(
+void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c2__${ISA}_${"mlal" if MLA else "mull"}_${DUP.lower()}(
     size_t mr,
     size_t nc,
     size_t kc,
@@ -86,9 +87,23 @@
       $if MLA:
         while (k >= 16 * sizeof(int8_t)) {
           $for M in range(MR):
-            $if LD4R:
+            $if DUP == "LD4R":
               const int16x4x4_t va${M}x0 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
               const int16x4x4_t va${M}x1 = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+            $elif DUP == "LD2R":
+              const int16x4x2_t va${M}0x0 = vld2_dup_s16((const void*)a${M});
+              const int16x4x2_t va${M}1x0 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+              const int16x4x2_t va${M}0x1 = vld2_dup_s16((const void*)a${M});
+              const int16x4x2_t va${M}1x1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+            $elif DUP == "LD1R":
+              const int16x4_t va${M}0x0 = vld1_dup_s16((const void*)a${M});
+              const int16x4_t va${M}1x0 = vld1_dup_s16((const void*)(a${M} + 2));
+              const int16x4_t va${M}2x0 = vld1_dup_s16((const void*)(a${M} + 4));
+              const int16x4_t va${M}3x0 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
+              const int16x4_t va${M}0x1 = vld1_dup_s16((const void*)a${M});
+              const int16x4_t va${M}1x1 = vld1_dup_s16((const void*)(a${M} + 2));
+              const int16x4_t va${M}2x1 = vld1_dup_s16((const void*)(a${M} + 4));
+              const int16x4_t va${M}3x1 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
             $else:
               const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
               const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
@@ -99,12 +114,19 @@
 
           $for K in range(4):
             $for M in range(MR):
-              $if LD4R:
+              $if DUP == "LD4R":
                 const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}x0.val[${K}]);
                 const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}x1.val[${K}]);
+              $elif DUP == "LD2R":
+                const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${int(K/2)}x0.val[${K%2}]);
+                const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${int(K/2)}x1.val[${K%2}]);
+              $elif DUP == "LD1R":
+                const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(va${M}${K}x0);
+                const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(va${M}${K}x1);
               $else:
                 const int8x8_t va${M}c${K}x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x0), ${K}));
                 const int8x8_t va${M}c${K}x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}x1), ${K}));
+
             $for N in range(0, NR, 4):
               $for M in range(MR):
                 int16x8_t vprod${M}x${ABC[N:N+4]}c${K} = vmull_s8(vb${ABC[N:N+4]}c${K}x0, va${M}c${K}x0);
@@ -119,10 +141,18 @@
 
       ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
         $for M in range(MR):
-         $if LD4R:
-           const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
-         $else:
-           const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
+          $if DUP == "LD4R":
+            const int16x4x4_t va${M} = vld4_dup_s16((const void*)a${M}); a${M} += 8;
+          $elif DUP == "LD2R":
+            const int16x4x2_t va${M}0 = vld2_dup_s16((const void*)a${M});
+            const int16x4x2_t va${M}1 = vld2_dup_s16((const void*)(a${M} + 4)); a${M} += 8;
+          $elif DUP == "LD1R":
+            const int16x4_t va${M}0 = vld1_dup_s16((const void*)a${M});
+            const int16x4_t va${M}1 = vld1_dup_s16((const void*)(a${M} + 2));
+            const int16x4_t va${M}2 = vld1_dup_s16((const void*)(a${M} + 4));
+            const int16x4_t va${M}3 = vld1_dup_s16((const void*)(a${M} + 6)); a${M} += 8;
+          $else:
+            const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
 
         $for K in range(4):
           $for N in range(0, NR, 4):
@@ -130,8 +160,12 @@
 
         $for K in range(4):
           $for M in range(MR):
-            $if LD4R:
+            $if DUP == "LD4R":
               const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}.val[${K}]);
+            $elif DUP == "LD2R":
+              const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${int(K/2)}.val[${K%2}]);
+            $elif DUP == "LD1R":
+              const int8x8_t va${M}c${K} = vreinterpret_s8_s16(va${M}${K});
             $else:
               const int8x8_t va${M}c${K} = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va${M}), ${K}));
 
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
index 4fe336c..731d298 100644
--- a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -81,6 +81,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -99,6 +100,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -117,6 +119,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -135,6 +138,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..d72f568
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,357 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..0b0c8e9
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
index 23c1544..9398df3 100644
--- a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -81,6 +81,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -99,6 +100,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -117,6 +119,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -135,6 +138,7 @@
         vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..bbdf888
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,251 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..dd69de7
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,249 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
index 2eeb261..ae0b4b7 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..efeba82
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,263 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..15884fd
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
index 4ed1059..55c7a52 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
index f625f40..e80ae2b 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..0f51636
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,258 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..eadfc8f
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,252 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index ee36e7e..a5bd984 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -72,6 +72,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -82,6 +83,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -92,6 +94,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -102,6 +105,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
index 209cae5..d9bf45e 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
index a6d1de7..672bcc4 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..ef00e5a
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,260 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..de66b6c
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,254 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
index c42b556..cc5dc9b 100644
--- a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -71,6 +71,7 @@
 
         const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
@@ -81,6 +82,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
         const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
@@ -91,6 +93,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
         const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
@@ -101,6 +104,7 @@
         vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
         const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
         vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..f4e7c2c
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,194 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..e1127b0
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,192 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+          }
+        }
+      }
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
index 778d81e..c2cc4ff 100644
--- a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -97,6 +97,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -129,6 +130,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -161,6 +163,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -193,6 +196,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..a171e2b
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,527 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..38d94f3
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,515 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
index b8efd96..6fb25bd 100644
--- a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -97,6 +97,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -129,6 +130,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -161,6 +163,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -193,6 +196,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..c20966e
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,357 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..1d7b9a0
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,353 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
index e96bccf..ce8efc9 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
new file mode 100644
index 0000000..01825d8
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld1r.c
@@ -0,0 +1,372 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
new file mode 100644
index 0000000..20529b1
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld2r.c
@@ -0,0 +1,360 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
index 5b5674f..47dbc29 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-ld4r.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
index 4bdf2c5..6cec06f 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-dup.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
new file mode 100644
index 0000000..8fc47b3
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld1r.c
@@ -0,0 +1,361 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
new file mode 100644
index 0000000..90f8e0c
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld2r.c
@@ -0,0 +1,349 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
index c2d90bb..9693095 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-ld4r.c
@@ -86,6 +86,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -104,6 +105,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -122,6 +124,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -140,6 +143,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
index 59bc18a..10ec5e4 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-gemmlowp-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
index 832d222..b3d4265 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..b3d5ecb
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,363 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..e08d7a8
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 1261460..29c87f8 100644
--- a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -85,6 +85,7 @@
         const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
         const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -103,6 +104,7 @@
         const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
         const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -121,6 +123,7 @@
         const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
         const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
@@ -139,6 +142,7 @@
         const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
         const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..987e197
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,257 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..aada03a
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+          }
+        }
+      }
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
index 7627e9c..6da9949 100644
--- a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -113,6 +113,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -159,6 +160,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -205,6 +207,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -251,6 +254,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..e73705d
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,701 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..5c61701
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,683 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
index ed90930..46c8889 100644
--- a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -113,6 +113,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -159,6 +160,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -205,6 +207,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -251,6 +254,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..e6758fe
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,467 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..3464de6
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,461 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
index 3eb683b..15224bf 100644
--- a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -99,6 +99,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 0));
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -125,6 +126,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 1));
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -151,6 +153,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 2));
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -177,6 +180,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x0), 3));
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..b9d6b78
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,472 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..d2c2745
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,454 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
index 97f08e7..1fdfab1 100644
--- a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -99,6 +99,7 @@
         const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
         const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]);
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -125,6 +126,7 @@
         const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
         const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]);
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -151,6 +153,7 @@
         const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
         const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]);
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -177,6 +180,7 @@
         const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
         const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]);
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..8f840a0
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,326 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..3fd3d05
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,320 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+          }
+        }
+      }
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
index 502b732..fe1e9f0 100644
--- a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-dup.c
@@ -129,6 +129,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -189,6 +190,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -249,6 +251,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -309,6 +312,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..da5095d
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,871 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+        const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..2326264
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,847 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+        const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
+        int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1c0x0);
+        int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2c0x0);
+        int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3c0x0);
+        const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
+        vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1c0x1);
+        vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2c0x1);
+        vprod3x89ABc0 = vmlal_s8(vprod3x89ABc0, vb89ABc0x1, va3c0x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
+        int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1c0x0);
+        int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2c0x0);
+        int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3c0x0);
+        const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
+        vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1c0x1);
+        vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2c0x1);
+        vprod3xCDEFc0 = vmlal_s8(vprod3xCDEFc0, vbCDEFc0x1, va3c0x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
+        int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1c1x0);
+        int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2c1x0);
+        int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3c1x0);
+        const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
+        vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1c1x1);
+        vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2c1x1);
+        vprod3x89ABc1 = vmlal_s8(vprod3x89ABc1, vb89ABc1x1, va3c1x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
+        int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1c1x0);
+        int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2c1x0);
+        int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3c1x0);
+        const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
+        vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1c1x1);
+        vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2c1x1);
+        vprod3xCDEFc1 = vmlal_s8(vprod3xCDEFc1, vbCDEFc1x1, va3c1x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
+        int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1c2x0);
+        int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2c2x0);
+        int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3c2x0);
+        const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
+        vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1c2x1);
+        vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2c2x1);
+        vprod3x89ABc2 = vmlal_s8(vprod3x89ABc2, vb89ABc2x1, va3c2x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
+        int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1c2x0);
+        int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2c2x0);
+        int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3c2x0);
+        const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
+        vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1c2x1);
+        vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2c2x1);
+        vprod3xCDEFc2 = vmlal_s8(vprod3xCDEFc2, vbCDEFc2x1, va3c2x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
+        int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1c3x0);
+        int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2c3x0);
+        int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3c3x0);
+        const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
+        vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1c3x1);
+        vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2c3x1);
+        vprod3x89ABc3 = vmlal_s8(vprod3x89ABc3, vb89ABc3x1, va3c3x1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
+        int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1c3x0);
+        int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2c3x0);
+        int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3c3x0);
+        const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
+        vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1c3x1);
+        vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2c3x1);
+        vprod3xCDEFc3 = vmlal_s8(vprod3xCDEFc3, vbCDEFc3x1, va3c3x1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
index e282fba..f6c3e24 100644
--- a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -129,6 +129,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -189,6 +190,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -249,6 +251,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -309,6 +312,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..46a978f
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,573 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..a8a707c
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,565 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc1x89AB = vacc0x89AB;
+    int32x4_t vacc1xCDEF = vacc0xCDEF;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc2x89AB = vacc0x89AB;
+    int32x4_t vacc2xCDEF = vacc0xCDEF;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+    int32x4_t vacc3x89AB = vacc0x89AB;
+    int32x4_t vacc3xCDEF = vacc0xCDEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+        const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+        const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+        const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+        const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+        const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+        const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+        const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+        const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+        const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+        const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+        const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+        const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+        const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+        const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+        const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
+        const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
+        const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
+        const int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3, va3c3);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3);
+        const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
+        const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
+        const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
+        const int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3, va3c3);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
+        vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
+        const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
+        vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
+        vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
+        const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
+        vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
+        vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
+        const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
+        vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0, va3c0);
+        vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0);
+        const int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0, va3c0);
+        vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
+          vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
+          const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
+          vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
+          vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
+          const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
+          vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
+          vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
+          const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
+          vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+          const int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1, va3c1);
+          vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1);
+          const int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1, va3c1);
+          vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
+            vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
+            const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
+            vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
+            vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
+            const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
+            vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
+            vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
+            const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
+            vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+            const int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2, va3c2);
+            vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2);
+            const int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2, va3c2);
+            vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
index 2e3feae..5aef476 100644
--- a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-dup.c
@@ -113,6 +113,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 0));
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 0));
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 0));
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -147,6 +148,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 1));
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 1));
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 1));
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -181,6 +183,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 2));
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 2));
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 2));
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -215,6 +218,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2x1), 3));
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x0), 3));
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3x1), 3));
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
new file mode 100644
index 0000000..f92e8d8
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld1r.c
@@ -0,0 +1,575 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20x0 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x0 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x0 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x0 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va20x1 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21x1 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22x1 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23x1 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30x0 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x0 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x0 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x0 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+        const int16x4_t va30x1 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31x1 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32x1 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33x1 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va21x0);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va21x1);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va31x0);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va31x1);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va22x0);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va22x1);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va32x0);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va32x1);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va23x0);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va23x1);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va33x0);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va33x1);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
new file mode 100644
index 0000000..e37d199
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld2r.c
@@ -0,0 +1,551 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20x0 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x0 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va20x1 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21x1 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30x0 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x0 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+        const int16x4x2_t va30x1 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31x1 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
+        const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
+        const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
+        const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
+        const int8x8_t va2c0x0 = vreinterpret_s8_s16(va20x0.val[0]);
+        const int8x8_t va2c0x1 = vreinterpret_s8_s16(va20x1.val[0]);
+        const int8x8_t va3c0x0 = vreinterpret_s8_s16(va30x0.val[0]);
+        const int8x8_t va3c0x1 = vreinterpret_s8_s16(va30x1.val[0]);
+
+        int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
+        int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
+        int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
+        int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3c0x0);
+        const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
+        vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
+        vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1);
+        vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3c0x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
+        int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
+        int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0);
+        int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3c0x0);
+        const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
+        vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
+        vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1);
+        vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3c0x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
+        const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
+        const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
+        const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
+        const int8x8_t va2c1x0 = vreinterpret_s8_s16(va20x0.val[1]);
+        const int8x8_t va2c1x1 = vreinterpret_s8_s16(va20x1.val[1]);
+        const int8x8_t va3c1x0 = vreinterpret_s8_s16(va30x0.val[1]);
+        const int8x8_t va3c1x1 = vreinterpret_s8_s16(va30x1.val[1]);
+
+        int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
+        int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
+        int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
+        int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3c1x0);
+        const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
+        vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
+        vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1);
+        vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3c1x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
+        int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
+        int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0);
+        int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3c1x0);
+        const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
+        vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
+        vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1);
+        vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3c1x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
+        const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
+        const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
+        const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
+        const int8x8_t va2c2x0 = vreinterpret_s8_s16(va21x0.val[0]);
+        const int8x8_t va2c2x1 = vreinterpret_s8_s16(va21x1.val[0]);
+        const int8x8_t va3c2x0 = vreinterpret_s8_s16(va31x0.val[0]);
+        const int8x8_t va3c2x1 = vreinterpret_s8_s16(va31x1.val[0]);
+
+        int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
+        int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
+        int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
+        int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3c2x0);
+        const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
+        vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
+        vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1);
+        vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3c2x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
+        int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
+        int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0);
+        int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3c2x0);
+        const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
+        vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
+        vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1);
+        vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3c2x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
+        const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
+        const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
+        const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
+        const int8x8_t va2c3x0 = vreinterpret_s8_s16(va21x0.val[1]);
+        const int8x8_t va2c3x1 = vreinterpret_s8_s16(va21x1.val[1]);
+        const int8x8_t va3c3x0 = vreinterpret_s8_s16(va31x0.val[1]);
+        const int8x8_t va3c3x1 = vreinterpret_s8_s16(va31x1.val[1]);
+
+        int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
+        int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
+        int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
+        int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3c3x0);
+        const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
+        vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
+        vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1);
+        vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3c3x1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
+        int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
+        int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0);
+        int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3c3x0);
+        const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
+        vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
+        vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1);
+        vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3c3x1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
index a957034..c247266 100644
--- a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mlal-ld4r.c
@@ -113,6 +113,7 @@
         const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]);
         const int8x8_t va3c0x0 = vreinterpret_s8_s16(va3x0.val[0]);
         const int8x8_t va3c0x1 = vreinterpret_s8_s16(va3x1.val[0]);
+
         int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
         int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
         int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0);
@@ -147,6 +148,7 @@
         const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]);
         const int8x8_t va3c1x0 = vreinterpret_s8_s16(va3x0.val[1]);
         const int8x8_t va3c1x1 = vreinterpret_s8_s16(va3x1.val[1]);
+
         int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
         int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
         int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0);
@@ -181,6 +183,7 @@
         const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]);
         const int8x8_t va3c2x0 = vreinterpret_s8_s16(va3x0.val[2]);
         const int8x8_t va3c2x1 = vreinterpret_s8_s16(va3x1.val[2]);
+
         int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
         int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
         int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0);
@@ -215,6 +218,7 @@
         const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]);
         const int8x8_t va3c3x0 = vreinterpret_s8_s16(va3x0.val[3]);
         const int8x8_t va3c3x1 = vreinterpret_s8_s16(va3x1.val[3]);
+
         int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
         int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
         int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0);
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
new file mode 100644
index 0000000..5ac9976
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld1r.c
@@ -0,0 +1,389 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4_t va00 = vld1_dup_s16((const void*)a0);
+        const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
+        const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
+        const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
+        const int16x4_t va10 = vld1_dup_s16((const void*)a1);
+        const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
+        const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
+        const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
+        const int16x4_t va20 = vld1_dup_s16((const void*)a2);
+        const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
+        const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
+        const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
+        const int16x4_t va30 = vld1_dup_s16((const void*)a3);
+        const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2));
+        const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4));
+        const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va31);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va32);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va33);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
new file mode 100644
index 0000000..7ee9d66
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c2-minmax-rndnu-neon-mull-ld2r.c
@@ -0,0 +1,381 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c2-neon-mull-dup.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  kc = round_up_po2(kc, 2 * sizeof(int8_t));
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
+    int32x4_t vacc1x0123 = vacc0x0123;
+    int32x4_t vacc1x4567 = vacc0x4567;
+    int32x4_t vacc2x0123 = vacc0x0123;
+    int32x4_t vacc2x4567 = vacc0x4567;
+    int32x4_t vacc3x0123 = vacc0x0123;
+    int32x4_t vacc3x4567 = vacc0x4567;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
+        const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
+        const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
+        const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
+        const int16x4x2_t va20 = vld2_dup_s16((const void*)a2);
+        const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8;
+        const int16x4x2_t va30 = vld2_dup_s16((const void*)a3);
+        const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8;
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]);
+
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+        const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
+        const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
+        const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]);
+        const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]);
+
+        const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+        const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+        const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+        const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+        const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+        const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+        const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+        const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+        const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
+        const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
+        const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]);
+        const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]);
+
+        const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+        const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+        const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+        const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+        const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+        const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+        const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+        const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+        const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
+        const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
+        const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]);
+        const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]);
+
+        const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
+        const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
+        const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
+        const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
+        const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
+        const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
+        const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
+        const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
+        const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
+        vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
+        const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
+        vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
+        const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
+        vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
+        const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
+        vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
+        const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
+        vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
+        const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
+        vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0));
+        const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0);
+        vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
+        const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0);
+        vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
+
+        if (k > 2 * sizeof(int8_t)) {
+          const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
+          const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
+          vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
+          const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
+          vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
+          const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
+          vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
+          const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
+          vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
+          const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
+          vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
+          const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
+          vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1));
+          const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1);
+          vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
+          const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1);
+          vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
+
+          if (k > 4 * sizeof(int8_t)) {
+            const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+            const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
+            const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
+            vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
+            const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
+            vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
+            const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
+            const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
+            vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
+            const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
+            vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
+            const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
+            const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
+            vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
+            const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
+            vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
+            const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2));
+            const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2);
+            vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
+            const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2);
+            vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
+          }
+        }
+      }
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index a6e074d..3819681 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -802,6 +802,54 @@
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_gemmlowp_ukernel_1x8c2__neon_mlal_dup)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x8c2__neon_mlal_dup)
 
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r)
+
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r)
@@ -1228,6 +1276,18 @@
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_dup)
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_dup)
 
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r)
+
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r)
+
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r)
+
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r)
+
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r)
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld4r)
 
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 340e47e..602bd14 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -628,6 +628,54 @@
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_gemmlowp_ukernel_1x8c2__neon_mlal_dup)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_gemmlowp_ukernel_2x8c2__neon_mlal_dup)
 
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r)
+
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld4r)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r)
@@ -985,6 +1033,18 @@
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_dup)
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_dup)
 
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r)
+
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r)
+
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r)
+
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r)
+
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r)
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld4r)
 
diff --git a/test/qc8-gemm-minmax-fp32.cc b/test/qc8-gemm-minmax-fp32.cc
index 3b10bc9..ece4187 100644
--- a/test/qc8-gemm-minmax-fp32.cc
+++ b/test/qc8-gemm-minmax-fp32.cc
@@ -23,6 +23,3654 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QC8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD4R, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qc8-gemm-minmax-fp32.yaml b/test/qc8-gemm-minmax-fp32.yaml
index c0face6..6edbfcd 100644
--- a/test/qc8-gemm-minmax-fp32.yaml
+++ b/test/qc8-gemm-minmax-fp32.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
 - name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r
   init: xnn_init_qs8_minmax_neon_fp32_params
   k-block: 16
diff --git a/test/qc8-igemm-minmax-fp32.cc b/test/qc8-igemm-minmax-fp32.cc
index ba61a28..30d015b 100644
--- a/test/qc8-igemm-minmax-fp32.cc
+++ b/test/qc8-igemm-minmax-fp32.cc
@@ -23,6 +23,3750 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QC8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD4R, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qc8-igemm-minmax-fp32.yaml b/test/qc8-igemm-minmax-fp32.yaml
index 9983200..5d1f40a 100644
--- a/test/qc8-igemm-minmax-fp32.yaml
+++ b/test/qc8-igemm-minmax-fp32.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
 - name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r
   init: xnn_init_qs8_minmax_neon_fp32_params
   k-block: 16
diff --git a/test/qs8-gemm-minmax-fp32.cc b/test/qs8-gemm-minmax-fp32.cc
index a8938f7..3b95514 100644
--- a/test/qs8-gemm-minmax-fp32.cc
+++ b/test/qs8-gemm-minmax-fp32.cc
@@ -23,6 +23,3654 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_GEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD4R, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-gemm-minmax-fp32.yaml b/test/qs8-gemm-minmax-fp32.yaml
index dff0048..31aee5a 100644
--- a/test/qs8-gemm-minmax-fp32.yaml
+++ b/test/qs8-gemm-minmax-fp32.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
 - name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r
   init: xnn_init_qs8_conv_minmax_fp32_neon_params
   k-block: 16
diff --git a/test/qs8-gemm-minmax-rndnu.cc b/test/qs8-gemm-minmax-rndnu.cc
index a096fbf..5e83d3f 100644
--- a/test/qs8-gemm-minmax-rndnu.cc
+++ b/test/qs8-gemm-minmax-rndnu.cc
@@ -23,6 +23,14598 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD4R, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-gemm-minmax-rndnu.yaml b/test/qs8-gemm-minmax-rndnu.yaml
index e090269..e780f4a 100644
--- a/test/qs8-gemm-minmax-rndnu.yaml
+++ b/test/qs8-gemm-minmax-rndnu.yaml
@@ -3,6 +3,102 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
 - name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r
   init: xnn_init_qs8_conv_minmax_rndnu_neon_params
   k-block: 8
diff --git a/test/qs8-igemm-minmax-fp32.cc b/test/qs8-igemm-minmax-fp32.cc
index 2a21c23..3aa38ce 100644
--- a/test/qs8-igemm-minmax-fp32.cc
+++ b/test/qs8-igemm-minmax-fp32.cc
@@ -23,6 +23,3750 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C2__NEONV8_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_IGEMM_MINMAX_FP32_1X8C2__NEON_MLAL_LD4R, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-igemm-minmax-fp32.yaml b/test/qs8-igemm-minmax-fp32.yaml
index 174e5a0..7051d5d 100644
--- a/test/qs8-igemm-minmax-fp32.yaml
+++ b/test/qs8-igemm-minmax-fp32.yaml
@@ -3,6 +3,30 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c2__neonv8_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
 - name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r
   init: xnn_init_qs8_conv_minmax_fp32_neon_params
   k-block: 16
diff --git a/test/qs8-igemm-minmax-rndnu.cc b/test/qs8-igemm-minmax-rndnu.cc
index f3a5a44..e5764b0 100644
--- a/test/qs8-igemm-minmax-rndnu.cc
+++ b/test/qs8-igemm-minmax-rndnu.cc
@@ -23,6 +23,14982 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD1R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MULL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(2)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(2)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(2)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C2__NEON_MLAL_LD2R, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(2)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2__NEON_MULL_LD4R, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-igemm-minmax-rndnu.yaml b/test/qs8-igemm-minmax-rndnu.yaml
index 1ef35de..fce04fc 100644
--- a/test/qs8-igemm-minmax-rndnu.yaml
+++ b/test/qs8-igemm-minmax-rndnu.yaml
@@ -3,6 +3,102 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld1r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mull_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_ld2r
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
 - name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_ld4r
   init: xnn_init_qs8_conv_minmax_rndnu_neon_params
   k-block: 8