QS8 C4S2 Neon GEMM/IGEMM microkernels

PiperOrigin-RevId: 409348021
diff --git a/BUILD.bazel b/BUILD.bazel
index f8e0a57..1732ae8 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -2481,21 +2481,25 @@
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c",
+    "src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/1x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c",
+    "src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-gemm/gen/4x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c",
+    "src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/1x16-minmax-fp32-neon-mlal-lane.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c",
+    "src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qc8-igemm/gen/4x16-minmax-fp32-neon-mlal-lane.c",
     "src/qs8-dwconv/gen/up8x9-minmax-fp32-neon-mul16.c",
@@ -2555,6 +2559,9 @@
     "src/qs8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c",
@@ -2571,6 +2578,8 @@
     "src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/1x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2586,6 +2595,9 @@
     "src/qs8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mull-padal.c",
@@ -2599,6 +2611,8 @@
     "src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/2x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2610,6 +2624,8 @@
     "src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x8c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2621,6 +2637,8 @@
     "src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/3x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2632,6 +2650,8 @@
     "src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x8c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2646,6 +2666,8 @@
     "src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-gemm/gen/4x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2664,6 +2686,9 @@
     "src/qs8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qs8-igemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c",
@@ -2680,6 +2705,8 @@
     "src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/1x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/1x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2695,6 +2722,9 @@
     "src/qs8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c",
     "src/qs8-igemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/2x8c8-minmax-gemmlowp-neon-mull-padal.c",
@@ -2708,6 +2738,8 @@
     "src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/2x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/2x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2719,6 +2751,8 @@
     "src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/3x8c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x8c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2730,6 +2764,8 @@
     "src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/3x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2741,6 +2777,8 @@
     "src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/4x8c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x8c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -2755,6 +2793,8 @@
     "src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c",
     "src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c",
     "src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-neon-mull-padal.c",
     "src/qs8-igemm/gen/4x16c16-minmax-gemmlowp-neon-mlal-padal.c",
@@ -3317,21 +3357,25 @@
     "src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qc8-igemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-dwconv/gen/up8x9-minmax-fp32-neonv8-mul16.c",
@@ -3345,21 +3389,25 @@
     "src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c",
     "src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c",
     "src/qs8-igemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c",
     "src/qs8-vmul/gen/minmax-fp32-neonv8-ld64-x8.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 869892c..d9802cd 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1510,21 +1510,25 @@
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neon-mlal-padal-dup.c
   src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c
+  src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/1x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neon-mlal-padal-dup.c
   src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c
+  src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c
   src/qc8-gemm/gen/4x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neon-mlal-padal-dup.c
   src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c
+  src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/1x16-minmax-fp32-neon-mlal-lane.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neon-mlal-padal-dup.c
   src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c
+  src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c
   src/qc8-igemm/gen/4x16-minmax-fp32-neon-mlal-lane.c
   src/qs8-dwconv/gen/up8x9-minmax-fp32-neon-mul16.c
@@ -1584,6 +1588,9 @@
   src/qs8-gemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
+  src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c
   src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c
@@ -1600,6 +1607,8 @@
   src/qs8-gemm/gen/1x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/1x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1615,6 +1624,9 @@
   src/qs8-gemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
+  src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c
   src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mull-padal.c
@@ -1628,6 +1640,8 @@
   src/qs8-gemm/gen/2x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/2x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1639,6 +1653,8 @@
   src/qs8-gemm/gen/3x8c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/3x8c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1650,6 +1666,8 @@
   src/qs8-gemm/gen/3x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/3x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1661,6 +1679,8 @@
   src/qs8-gemm/gen/4x8c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/4x8c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1675,6 +1695,8 @@
   src/qs8-gemm/gen/4x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-gemm/gen/4x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1693,6 +1715,9 @@
   src/qs8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
+  src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c
   src/qs8-igemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c
@@ -1709,6 +1734,8 @@
   src/qs8-igemm/gen/1x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/1x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/1x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1724,6 +1751,9 @@
   src/qs8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c
   src/qs8-igemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/2x8c8-minmax-gemmlowp-neon-mull-padal.c
@@ -1737,6 +1767,8 @@
   src/qs8-igemm/gen/2x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/2x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/2x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1748,6 +1780,8 @@
   src/qs8-igemm/gen/3x8c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/3x8c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/3x8c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1759,6 +1793,8 @@
   src/qs8-igemm/gen/3x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/3x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1770,6 +1806,8 @@
   src/qs8-igemm/gen/4x8c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/4x8c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/4x8c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -1784,6 +1822,8 @@
   src/qs8-igemm/gen/4x16c2s4-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c
   src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
+  src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
   src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c
   src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-neon-mull-padal.c
   src/qs8-igemm/gen/4x16c16-minmax-gemmlowp-neon-mlal-padal.c
@@ -2338,21 +2378,25 @@
   src/qc8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qc8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qc8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qc8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qc8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qc8-igemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-dwconv/gen/up8x9-minmax-fp32-neonv8-mul16.c
@@ -2366,21 +2410,25 @@
   src/qs8-gemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qs8-gemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-gemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qs8-gemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-gemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-igemm/gen/1x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qs8-igemm/gen/1x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/1x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-igemm/gen/2x8c2-minmax-fp32-neonv8-mlal-padal-dup.c
   src/qs8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c
+  src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c
   src/qs8-igemm/gen/4x16-minmax-fp32-neonv8-mlal-lane.c
   src/qs8-vmul/gen/minmax-fp32-neonv8-ld64-x8.c
@@ -3543,11 +3591,11 @@
 
 SET(PROD_AVX_MICROKERNEL_SRCS
   src/f16-f32-vcvt/gen/vcvt-avx-int16-x16.c
-  src/f32-f16-vcvt/gen/vcvt-avx-x24.c
   src/f32-dwconv/gen/up8x25-minmax-avx.c
   src/f32-dwconv/gen/up16x3-minmax-avx.c
   src/f32-dwconv/gen/up16x4-minmax-avx.c
   src/f32-dwconv/gen/up16x9-minmax-avx.c
+  src/f32-f16-vcvt/gen/vcvt-avx-x24.c
   src/f32-gemm/gen/1x16-minmax-avx-broadcast.c
   src/f32-gemm/gen/5x16-minmax-avx-broadcast.c
   src/f32-igemm/gen/1x16-minmax-avx-broadcast.c
diff --git a/bench/qs8-gemm-e2e.cc b/bench/qs8-gemm-e2e.cc
index 70db3d5..9f9cab1 100644
--- a/bench/qs8-gemm-e2e.cc
+++ b/bench/qs8-gemm-e2e.cc
@@ -200,7 +200,6 @@
       2 /* mr */, 8  /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x16__neon_mlal_lane,
@@ -211,7 +210,6 @@
       2 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x8__neon_mlal_lane,
@@ -222,7 +220,6 @@
       3 /* mr */, 8  /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x16__neon_mlal_lane,
@@ -233,7 +230,6 @@
       3 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x8__neon_mlal_lane,
@@ -244,7 +240,6 @@
       4 /* mr */, 8  /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x16__neon_mlal_lane,
@@ -255,7 +250,6 @@
       4 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_6x8_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_6x8__neon_mlal_lane,
@@ -266,7 +260,6 @@
       6 /* mr */, 8  /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_6x16_gemmlowp__neon_mlal_lane(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_6x16__neon_mlal_lane,
@@ -277,7 +270,6 @@
       6 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mlal_padal_dup,
@@ -288,7 +280,6 @@
       2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mlal_padal_dup,
@@ -299,7 +290,6 @@
       2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_padal_dup,
@@ -310,7 +300,6 @@
       3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mlal_padal_dup,
@@ -321,7 +310,6 @@
       3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mlal_padal_dup,
@@ -332,7 +320,6 @@
       4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c2__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mlal_padal_dup,
@@ -343,7 +330,6 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2s4__neon_mlal_padal,
@@ -354,7 +340,6 @@
       2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2s4__neon_mlal_padal,
@@ -365,7 +350,6 @@
       2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2s4__neon_mlal_padal,
@@ -376,7 +360,6 @@
       3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2s4__neon_mlal_padal,
@@ -387,7 +370,6 @@
       3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2s4__neon_mlal_padal,
@@ -398,7 +380,6 @@
       4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c2s4__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2s4__neon_mlal_padal,
@@ -409,7 +390,6 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4__neon_mlal_padal_dup,
@@ -420,7 +400,6 @@
       2 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mlal_padal_dup,
@@ -431,7 +410,6 @@
       2 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mlal_padal_dup,
@@ -442,7 +420,6 @@
       3 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mlal_padal_dup,
@@ -453,7 +430,6 @@
       3 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mlal_padal_dup,
@@ -464,7 +440,6 @@
       4 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c4__neon_mlal_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mlal_padal_dup,
@@ -475,7 +450,66 @@
       4 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
+  static void qs8_gemm_2x8c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c4s2__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_2x8c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_padal_dup,
@@ -486,7 +520,6 @@
       2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2__neon_mull_padal_dup,
@@ -497,7 +530,6 @@
       2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_padal_dup,
@@ -508,7 +540,6 @@
       3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2__neon_mull_padal_dup,
@@ -519,7 +550,6 @@
       3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2__neon_mull_padal_dup,
@@ -530,7 +560,6 @@
       4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c2__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2__neon_mull_padal_dup,
@@ -541,7 +570,6 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2s4__neon_mull_padal,
@@ -552,7 +580,6 @@
       2 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c2s4__neon_mull_padal,
@@ -563,7 +590,6 @@
       2 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2s4__neon_mull_padal,
@@ -574,7 +600,6 @@
       3 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2s4__neon_mull_padal,
@@ -585,7 +610,6 @@
       3 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c2s4__neon_mull_padal,
@@ -596,7 +620,6 @@
       4 /* mr */, 8  /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c2s4__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c2s4__neon_mull_padal,
@@ -607,7 +630,6 @@
       4 /* mr */, 16 /* nr */, 1 /* log2_kr */, 2 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4__neon_mull_padal_dup,
@@ -618,7 +640,6 @@
       2 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4__neon_mull_padal_dup,
@@ -629,7 +650,6 @@
       2 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4__neon_mull_padal_dup,
@@ -640,7 +660,6 @@
       3 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mull_padal_dup,
@@ -651,7 +670,6 @@
       3 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neon_mull_padal_dup,
@@ -662,7 +680,6 @@
       4 /* mr */, 8  /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c4__neon_mull_padal_dup(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mull_padal_dup,
@@ -673,7 +690,66 @@
       4 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
+  static void qs8_gemm_2x8c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x16c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      2 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x8c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_3x16c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      3 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x8c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 8  /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_4x16c4s2__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal,
+      xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal,
+      xnn_init_qs8_conv_minmax_rndnu_neon_params,
+      4 /* mr */, 16 /* nr */, 2 /* log2_kr */, 1 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_4x8c4__neondot(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4__neondot,
@@ -734,7 +810,6 @@
       8 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEONDOT);
   }
-
   static void qs8_gemm_2x8c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x8c8__neon_mull_padal,
@@ -745,7 +820,6 @@
       2 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x16c8__neon_mull_padal,
@@ -756,7 +830,6 @@
       2 /* mr */, 16 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x8c8__neon_mull_padal,
@@ -767,7 +840,6 @@
       3 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x16c8__neon_mull_padal,
@@ -778,7 +850,6 @@
       3 /* mr */, 16 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x8c8__neon_mull_padal,
@@ -789,7 +860,6 @@
       4 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c8_gemmlowp__neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x16c8__neon_mull_padal,
@@ -800,31 +870,6 @@
       4 /* mr */, 16 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
-#if XNN_ENABLE_FULL_BENCHMARKS
-  static void qs8_gemm_1x8c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
-    GEMMEnd2EndBenchmark(state, model,
-      xnn_qs8_gemm_minmax_gemmlowp_ukernel_1x8c16__neon_mlal_padal,
-      xnn_qs8_igemm_minmax_gemmlowp_ukernel_1x8c16__neon_mlal_padal,
-      xnn_qs8_gemm_minmax_gemmlowp_ukernel_1x8c16__neon_mlal_padal,
-      xnn_qs8_igemm_minmax_gemmlowp_ukernel_1x8c16__neon_mlal_padal,
-      xnn_init_qs8_conv_minmax_gemmlowp_neon_params,
-      1 /* mr */, 8  /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
-      benchmark::utils::CheckNEON);
-  }
-
-  static void qs8_gemm_1x16c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
-    GEMMEnd2EndBenchmark(state, model,
-      xnn_qs8_gemm_minmax_gemmlowp_ukernel_1x16c16__neon_mlal_padal,
-      xnn_qs8_igemm_minmax_gemmlowp_ukernel_1x16c16__neon_mlal_padal,
-      xnn_qs8_gemm_minmax_gemmlowp_ukernel_1x16c16__neon_mlal_padal,
-      xnn_qs8_igemm_minmax_gemmlowp_ukernel_1x16c16__neon_mlal_padal,
-      xnn_init_qs8_conv_minmax_gemmlowp_neon_params,
-      1 /* mr */, 16 /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
-      benchmark::utils::CheckNEON);
-  }
-#endif  // XNN_ENABLE_FULL_BENCHMARKS
-
   static void qs8_gemm_2x8c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x8c16__neon_mlal_padal,
@@ -835,7 +880,6 @@
       2 /* mr */, 8  /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x16c16__neon_mlal_padal,
@@ -846,7 +890,6 @@
       2 /* mr */, 16 /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x8c16__neon_mlal_padal,
@@ -857,7 +900,6 @@
       4 /* mr */, 8  /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x16c16__neon_mlal_padal,
@@ -868,7 +910,6 @@
       4 /* mr */, 16 /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x8c16__neon_mlal_padal,
@@ -879,7 +920,6 @@
       4 /* mr */, 8  /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c16_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x16c16__neon_mlal_padal,
@@ -890,7 +930,6 @@
       4 /* mr */, 16 /* nr */, 4 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x8c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x8c8__neon_mlal_padal,
@@ -901,7 +940,6 @@
       2 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_2x16c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_2x16c8__neon_mlal_padal,
@@ -912,7 +950,6 @@
       2 /* mr */, 16 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x8c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x8c8__neon_mlal_padal,
@@ -923,7 +960,6 @@
       3 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_3x16c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_3x16c8__neon_mlal_padal,
@@ -934,7 +970,6 @@
       3 /* mr */, 16 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x8c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x8c8__neon_mlal_padal,
@@ -945,7 +980,6 @@
       4 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckNEON);
   }
-
   static void qs8_gemm_4x16c8_gemmlowp__neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_gemmlowp_ukernel_4x16c8__neon_mlal_padal,
@@ -999,6 +1033,20 @@
   BENCHMARK_QS8_END2END(qs8_gemm_4x8c4__neon_mull_padal_dup);
   BENCHMARK_QS8_END2END(qs8_gemm_4x16c4__neon_mull_padal_dup);
 
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c4s2__neon_mlal_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c4s2__neon_mlal_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c4s2__neon_mlal_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c4s2__neon_mlal_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c4s2__neon_mlal_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c4s2__neon_mlal_padal);
+
+  BENCHMARK_QS8_END2END(qs8_gemm_2x8c4s2__neon_mull_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_2x16c4s2__neon_mull_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x8c4s2__neon_mull_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_3x16c4s2__neon_mull_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x8c4s2__neon_mull_padal);
+  BENCHMARK_QS8_END2END(qs8_gemm_4x16c4s2__neon_mull_padal);
+
   BENCHMARK_QS8_END2END(qs8_gemm_2x8c2__neon_mlal_padal_dup);
   BENCHMARK_QS8_END2END(qs8_gemm_2x16c2__neon_mlal_padal_dup);
   BENCHMARK_QS8_END2END(qs8_gemm_3x8c2__neon_mlal_padal_dup);
@@ -1148,7 +1196,6 @@
       4 /* mr */, 4 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckXOP);
   }
-
   static void qs8_gemm_2x4c8__xop_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__xop_ld64,
@@ -1169,7 +1216,6 @@
       3 /* mr */, 4 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckXOP);
   }
-
   static void qs8_gemm_2x4c8__xop_ld128(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__xop_ld128,
@@ -1190,7 +1236,6 @@
       3 /* mr */, 4 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckXOP);
   }
-
   static void qs8_gemm_2x4c2__avx_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c2__avx_ld64,
@@ -1293,7 +1338,6 @@
       3 /* mr */, 4 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckAVX);
   }
-
   static void qs8_gemm_2x4c2__sse41_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c2__sse41_ld64,
@@ -1354,7 +1398,6 @@
       4 /* mr */, 4 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckSSE41);
   }
-
   static void qs8_gemm_2x4c8__sse41_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__sse41_ld64,
@@ -1437,7 +1480,6 @@
       3 /* mr */, 4 /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
       benchmark::utils::CheckSSSE3);
   }
-
   static void qs8_gemm_2x4c2__sse2_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c2__sse2_ld64,
@@ -1492,7 +1534,6 @@
       xnn_init_qs8_conv_minmax_fp32_sse2_params,
       4 /* mr */, 4 /* nr */, 1 /* log2_kr */, 0 /* log2_sr */);
   }
-
   static void qs8_gemm_2x4c8__sse2_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__sse2_ld64,
@@ -1647,7 +1688,6 @@
       xnn_init_qs8_conv_minmax_fp32_wasmsimd_params,
       4 /* mr */, 4 /* nr */, 1 /* log2_kr */);
   }
-
   static void qs8_gemm_2x4c8__wasmsimd_dot16x2_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__wasmsimd_dot16x2_ld64,
@@ -1702,7 +1742,6 @@
       xnn_init_qs8_conv_minmax_fp32_wasmsimd_params,
       4 /* mr */, 4 /* nr */, 3 /* log2_kr */);
   }
-
   static void qs8_gemm_2x4c8__wasmsimd_mul16_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_fp32_ukernel_2x4c8__wasmsimd_mul16_ld64,
diff --git a/scripts/generate-qs8-gemm.sh b/scripts/generate-qs8-gemm.sh
index 5286804..7f093f6 100755
--- a/scripts/generate-qs8-gemm.sh
+++ b/scripts/generate-qs8-gemm.sh
@@ -334,6 +334,37 @@
 tools/xngen src/qs8-gemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
 tools/xngen src/qs8-gemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
 
+### C4S2 micro-kernels
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c &
+
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c &
+
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c &
+
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+tools/xngen src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+
 ### C8 micro-kernels
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c &
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mull-padal.c &
@@ -344,6 +375,15 @@
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mull-padal.c &
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mull-padal.c &
 
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c &
+tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c &
+
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c8-minmax-fp32-neon-mlal-padal.c &
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c8-minmax-fp32-neon-mlal-padal.c &
 
@@ -356,15 +396,6 @@
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-gemm/gen/1x8c8-minmax-fp32-neonv8-mlal-padal.c &
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-gemm/gen/2x8c8-minmax-fp32-neonv8-mlal-padal.c &
 
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x8c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x8c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x16c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x16c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/3x16c8-minmax-gemmlowp-neon-mlal-padal.c &
-tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/4x16c8-minmax-gemmlowp-neon-mlal-padal.c &
-
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/1x8c8-minmax-rndnu-neon-mlal-padal.c &
 tools/xngen src/qs8-gemm/c8-neon-mull-padal.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-gemm/gen/2x8c8-minmax-rndnu-neon-mlal-padal.c &
 
diff --git a/scripts/generate-qs8-igemm.sh b/scripts/generate-qs8-igemm.sh
index 92bfeac..f1dc560 100755
--- a/scripts/generate-qs8-igemm.sh
+++ b/scripts/generate-qs8-igemm.sh
@@ -310,35 +310,66 @@
 tools/xngen src/qs8-igemm/c2-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/2x8c2s4-minmax-fp32-neonv8-mlal-padal.c &
 
 ### C4 micro-kernels
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mull-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mull-padal-dup.c &
 
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
 
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c &
 
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/1x8c4-minmax-fp32-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/2x8c4-minmax-fp32-neon-mlal-padal-dup.c &
 
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32     -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
 
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
-tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU    -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4-minmax-rndnu-neon-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/1x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-dup.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/2x8c4-minmax-fp32-neonv8-mlal-padal-dup.c &
+
+### C4S2 micro-kernels
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=8  -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=16 -D MLA=0 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c &
+
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=8  -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=3 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=4 -D NR=16 -D MLA=1 -D REQUANTIZATION=RNDNU -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c &
+
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c &
+
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=0 -o src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c &
+
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=0 -D ARMV8=1 -o src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=1 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
+tools/xngen src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in -D MR=2 -D NR=8  -D MLA=1 -D REQUANTIZATION=FP32  -D CHANNELWISE=1 -D ARMV8=1 -o src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c &
 
 ### C8 micro-kernels
 tools/xngen src/qs8-igemm/c8-neon-mull-padal.c.in -D MR=1 -D NR=8  -D MLA=0 -D REQUANTIZATION=GEMMLOWP -D CHANNELWISE=0 -D ARMV8=0 -o src/qs8-igemm/gen/1x8c8-minmax-gemmlowp-neon-mull-padal.c &
diff --git a/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..5235bab
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,241 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..65cb918
--- /dev/null
+++ b/src/qc8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,236 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..ceb5378
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,344 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..4d83c79
--- /dev/null
+++ b/src/qc8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,333 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..2fbef36
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,254 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..059da8c
--- /dev/null
+++ b/src/qc8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,249 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..3386b9f
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,358 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->neon_fp32.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_fp32.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->neon_fp32.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..83729e7
--- /dev/null
+++ b/src/qc8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,347 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
+    const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in b/src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
new file mode 100644
index 0000000..29e2a2c
--- /dev/null
+++ b/src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
@@ -0,0 +1,402 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$assert NR % 8 == 0
+$assert 8 <= NR <= 16
+$assert REQUANTIZATION in ["FP32", "GEMMLOWP", "RNDNU"]
+$assert not CHANNELWISE or REQUANTIZATION == "FP32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+$if REQUANTIZATION == "FP32" and ARMV8:
+  #include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+$DATATYPE = "qc8" if CHANNELWISE else "qs8"
+$PARAMS_UNION = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("neonv8" if ARMV8 and not CHANNELWISE else "neon")
+$if REQUANTIZATION == "FP32" and CHANNELWISE and not ARMV8:
+  $PARAMS_STRUCT = "neon_fp32"
+$ISA = "neonv8" if ARMV8 else "neon"
+void xnn_${DATATYPE}_gemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4s2__${ISA}_${"mlal" if MLA else "mull"}_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  $for M in range(1, MR):
+    const int8_t* a${M} = (const int8_t*) ((uintptr_t) a${M-1} + a_stride);
+    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        a${M} = a${M-1};
+        c${M} = c${M-1};
+      }
+
+  do {
+    $for N in range(0, NR, 2):
+      int32x4_t vacc0x${ABC[N:N+2]} = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    $for M in range(1, MR):
+      $for N in range(0, NR, 2):
+        int32x4_t vacc${M}x${ABC[N:N+2]} = vacc0x${ABC[N:N+2]};
+
+    size_t k = kc;
+
+    $if MLA:
+      while (k >= 16 * sizeof(int8_t)) {
+        $for M in range(MR):
+          int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
+          int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
+
+        $for K in range(2):
+          $for N in range(0, NR, 2):
+            const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        $for K in range(2):
+          $for N in range(0, NR, 2):
+            $for M in range(MR):
+              int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
+            const int8x8_t vb${ABC[N:N+2]}c${K}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+            $for M in range(MR):
+              vprod${M}x${ABC[N:N+2]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+2]}c${K}, vb${ABC[N:N+2]}c${K}x1, va${M}x1);
+            $for M in range(MR):
+              vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
+          $if K + 1 != 2:
+            $for M in range(MR):
+              va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
+              va${M}x1 = vext_s8(va${M}x1, va${M}x1, 4);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+    ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
+      $for M in range(MR):
+        int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
+
+      $for K in range(2):
+        $for N in range(0, NR, 2):
+          const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      $for K in range(2):
+        $for N in range(0, NR, 2):
+          $for M in range(MR):
+            int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
+          $for M in range(MR):
+            vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
+        $if K + 1 != 2:
+          $for M in range(MR):
+            va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      $for M in range(MR):
+        const int8x8_t va${M} = vld1_s8(a${M}); a${M} = (const int8_t*) ((uintptr_t) a${M} + k);
+
+      $for N in range(0, NR, 2):
+        const int8x8_t vb${ABC[N:N+2]}c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      $for M in range(MR):
+        const int8x8_t va${M}c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), 0));
+        $for N in range(0, NR, 2):
+          const int16x8_t vprod${M}x${ABC[N:N+2]}c0 = vmull_s8(vb${ABC[N:N+2]}c0, va${M}c0);
+          vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        $for N in range(0, NR, 2):
+          const int8x8_t vb${ABC[N:N+2]}c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        $for M in range(MR):
+          const int8x8_t va${M}c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), 1));
+          $for N in range(0, NR, 2):
+            const int16x8_t vprod${M}x${ABC[N:N+2]}c1 = vmull_s8(vb${ABC[N:N+2]}c1, va${M}c1);
+            vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    $for M in range(MR):
+      $for N in range(0, NR, 4):
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vacc${M}x${ABC[N:N+2]}, vacc${M}x${ABC[N+2:N+4]});
+#else
+    $for M in range(MR):
+      $for N in range(0, NR, 4):
+        const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N:N+2]}), vget_high_s32(vacc${M}x${ABC[N:N+2]}));
+        const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N+2:N+4]}), vget_high_s32(vacc${M}x${ABC[N+2:N+4]}));
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
+#endif
+
+    $if REQUANTIZATION == "GEMMLOWP":
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_shift);
+      const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift);
+    $elif REQUANTIZATION == "RNDNU":
+      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
+    $elif REQUANTIZATION == "FP32":
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
+
+      $if CHANNELWISE:
+        $for N in range(0, NR, 4):
+          const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+          $for M in range(MR):
+            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
+      $else:
+        const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
+
+      $if ARMV8:
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
+      $else:
+        const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->${PARAMS_STRUCT}.output_min_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vmaxq_f32(vfpacc${M}x${ABC[N:N+4]}, voutput_min_less_zero_point);
+
+        const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->${PARAMS_STRUCT}.output_max_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vminq_f32(vfpacc${M}x${ABC[N:N+4]}, voutput_max_less_zero_point);
+
+        const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
+
+        const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_zero_point);
+
+    $if REQUANTIZATION != "FP32" or ARMV8:
+      const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
+#if XNN_ARCH_ARM64
+    $if REQUANTIZATION == "FP32" and not ARMV8:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vuzp1q_s16(vreinterpretq_s16_s32(vacc${M}x${ABC[N:N+4]}), vreinterpretq_s16_s32(vacc${M}x${ABC[N+4:N+8]}));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vuzp1q_s8(vreinterpretq_s8_s16(vacc${M}x${ABC[N:N+8]}), vreinterpretq_s8_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vuzp1q_s8(vreinterpretq_s8_s16(vacc${M-1}x${ABC[N:N+8]}), vreinterpretq_s8_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vmovn_s16(vacc${M}x${ABC[N:N+8]});
+    $else:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
+#else
+    $if REQUANTIZATION == "FP32" and not ARMV8:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vmovn_s32(vacc${M}x${ABC[N:N+4]}), vmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vmovn_s16(vacc${M}x${ABC[N:N+8]}), vmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vmovn_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vmovn_s16(vacc${M}x${ABC[N:N+8]});
+    $else:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
+#endif
+    $if REQUANTIZATION != "FP32" or ARMV8:
+      $if NR == 8 and MR == 1:
+        const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
+        const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
+      $else:
+        const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
+        const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
+          $elif M % 2 == 1:
+            vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
+          $elif M + 1 == MR:
+            $if NR == 8 and MR == 1:
+              vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
+            $else:
+              vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
+          $elif M % 2 == 1:
+            vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
+          $elif M + 1 == MR:
+            $if NR == 8 and MR == 1:
+              vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
+            $else:
+              vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
+
+    if (nc >= ${NR}) {
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
+          $elif M % 2 == 1:
+            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
+
+      $for M in range(MR):
+        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
+
+      $for M in range(MR):
+        a${M} = (const int8_t*) ((uintptr_t) a${M} - kc);
+
+      nc -= ${NR};
+    } else {
+      // Final case where not all of the ${NR} columns fit in the destination.
+      $if NR == 16:
+        $for M in range(MR):
+          $if M % 2 == 1:
+            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
+        if (nc & 8) {
+          $for M in range(MR):
+            $if M % 2 == 1:
+              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
+              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
+            $elif M + 1 == MR:
+              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
+          $for M in range(MR):
+            $if M % 2 == 1:
+              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
+            $elif M + 1 == MR:
+              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
+        }
+      if (nc & 4) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
+            vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
+          $elif M + 1 == MR:
+            vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
+      }
+      if (nc & 2) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
+            vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
+          $elif M + 1 == MR:
+            vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
+      }
+      if (nc & 1) {
+        $for M in range(MR):
+          $if M % 2 == 1:
+            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
+            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
+          $elif M + 1 == MR:
+            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..4f1311d
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,350 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..ee8478b
--- /dev/null
+++ b/src/qs8-gemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,260 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..0d7ed59
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..5241a0d
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,235 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..322e851
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,237 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..b801f9f
--- /dev/null
+++ b/src/qs8-gemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,187 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..187432c
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,528 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+      vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+      vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+      vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+      vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+      vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+      vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+      vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+      vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..a6f74a2
--- /dev/null
+++ b/src/qs8-gemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,386 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..4c8f7c7
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,343 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..a35317e
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,332 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..30b6b29
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,334 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..430a737
--- /dev/null
+++ b/src/qs8-gemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,256 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..e6df50b
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,710 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+      vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+      vprod2x89c0 = vmlal_s8(vprod2x89c0, vb89c0x1, va2x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+      vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+      vprod2xABc0 = vmlal_s8(vprod2xABc0, vbABc0x1, va2x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+      vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+      vprod2xCDc0 = vmlal_s8(vprod2xCDc0, vbCDc0x1, va2x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+      vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+      vprod2xEFc0 = vmlal_s8(vprod2xEFc0, vbEFc0x1, va2x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va2x1 = vext_s8(va2x1, va2x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+      vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+      vprod2x89c1 = vmlal_s8(vprod2x89c1, vb89c1x1, va2x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+      vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+      vprod2xABc1 = vmlal_s8(vprod2xABc1, vbABc1x1, va2x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+      vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+      vprod2xCDc1 = vmlal_s8(vprod2xCDc1, vbCDc1x1, va2x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+      vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+      vprod2xEFc1 = vmlal_s8(vprod2xEFc1, vbEFc1x1, va2x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..6332644
--- /dev/null
+++ b/src/qs8-gemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,516 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..439d34d
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,437 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va2x1 = vext_s8(va2x1, va2x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..afdc4e1
--- /dev/null
+++ b/src/qs8-gemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,331 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vout2x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1_lane_s8(c2, vout2x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..519f073
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,888 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+    int32x4_t vacc3x89 = vacc0x89;
+    int32x4_t vacc3xAB = vacc0xAB;
+    int32x4_t vacc3xCD = vacc0xCD;
+    int32x4_t vacc3xEF = vacc0xEF;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+      int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+      vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+      vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+      vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+      vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+      const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+      vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+      vprod2x89c0 = vmlal_s8(vprod2x89c0, vb89c0x1, va2x1);
+      vprod3x89c0 = vmlal_s8(vprod3x89c0, vb89c0x1, va3x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+      const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+      vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+      vprod2xABc0 = vmlal_s8(vprod2xABc0, vbABc0x1, va2x1);
+      vprod3xABc0 = vmlal_s8(vprod3xABc0, vbABc0x1, va3x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+      const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+      vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+      vprod2xCDc0 = vmlal_s8(vprod2xCDc0, vbCDc0x1, va2x1);
+      vprod3xCDc0 = vmlal_s8(vprod3xCDc0, vbCDc0x1, va3x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+      const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+      vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+      vprod2xEFc0 = vmlal_s8(vprod2xEFc0, vbEFc0x1, va2x1);
+      vprod3xEFc0 = vmlal_s8(vprod3xEFc0, vbEFc0x1, va3x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va2x1 = vext_s8(va2x1, va2x1, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      va3x1 = vext_s8(va3x1, va3x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+      vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+      vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+      vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+      vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+      const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+      vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+      vprod2x89c1 = vmlal_s8(vprod2x89c1, vb89c1x1, va2x1);
+      vprod3x89c1 = vmlal_s8(vprod3x89c1, vb89c1x1, va3x1);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+      const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+      vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+      vprod2xABc1 = vmlal_s8(vprod2xABc1, vbABc1x1, va2x1);
+      vprod3xABc1 = vmlal_s8(vprod3xABc1, vbABc1x1, va3x1);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+      const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+      vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+      vprod2xCDc1 = vmlal_s8(vprod2xCDc1, vbCDc1x1, va2x1);
+      vprod3xCDc1 = vmlal_s8(vprod3xCDc1, vbCDc1x1, va3x1);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+      const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+      vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+      vprod2xEFc1 = vmlal_s8(vprod2xEFc1, vbEFc1x1, va2x1);
+      vprod3xEFc1 = vmlal_s8(vprod3xEFc1, vbEFc1x1, va3x1);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+      const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      const int16x8_t vprod3x89c0 = vmull_s8(vb89c0, va3c0);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+      const int16x8_t vprod3xABc0 = vmull_s8(vbABc0, va3c0);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+      const int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0, va3c0);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+      const int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0, va3c0);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+        const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        const int16x8_t vprod3x89c1 = vmull_s8(vb89c1, va3c1);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+        const int16x8_t vprod3xABc1 = vmull_s8(vbABc1, va3c1);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+        const int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1, va3c1);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+        const int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1, va3c1);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+    int32x4_t vacc3x89AB = vpaddq_s32(vacc3x89, vacc3xAB);
+    int32x4_t vacc3xCDEF = vpaddq_s32(vacc3xCD, vacc3xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+    const int32x2_t vsum3x89 = vpadd_s32(vget_low_s32(vacc3x89), vget_high_s32(vacc3x89));
+    const int32x2_t vsum3xAB = vpadd_s32(vget_low_s32(vacc3xAB), vget_high_s32(vacc3xAB));
+    int32x4_t vacc3x89AB = vcombine_s32(vsum3x89, vsum3xAB);
+    const int32x2_t vsum3xCD = vpadd_s32(vget_low_s32(vacc3xCD), vget_high_s32(vacc3xCD));
+    const int32x2_t vsum3xEF = vpadd_s32(vget_low_s32(vacc3xEF), vget_high_s32(vacc3xEF));
+    int32x4_t vacc3xCDEF = vcombine_s32(vsum3xCD, vsum3xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..350465c
--- /dev/null
+++ b/src/qs8-gemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,642 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+    int32x4_t vacc3x89 = vacc0x89;
+    int32x4_t vacc3xAB = vacc0xAB;
+    int32x4_t vacc3xCD = vacc0xCD;
+    int32x4_t vacc3xEF = vacc0xEF;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+      int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+      int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+      int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+      int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+      int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+      int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+      int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+      int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+      int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+      int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+      int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+      int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+      int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+      int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+      int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+      int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+      int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+      int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+      int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+      int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+      int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+      int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+      int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+      int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+      int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+      int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+      int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+      int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+      int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+      int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+      int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+      vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+      const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+      vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+      const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+      vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+      const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+      vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+      vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+      const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+      vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+      const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+      vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+      const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+      vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+      vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+      const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+      vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+      const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+      vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+      const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+      vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+      const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+      const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      const int16x8_t vprod3x89c0 = vmull_s8(vb89c0, va3c0);
+      vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+      const int16x8_t vprod3xABc0 = vmull_s8(vbABc0, va3c0);
+      vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+      const int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0, va3c0);
+      vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+      const int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0, va3c0);
+      vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+        const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        const int16x8_t vprod3x89c1 = vmull_s8(vb89c1, va3c1);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+        const int16x8_t vprod3xABc1 = vmull_s8(vbABc1, va3c1);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+        const int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1, va3c1);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+        const int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1, va3c1);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+    int32x4_t vacc3x89AB = vpaddq_s32(vacc3x89, vacc3xAB);
+    int32x4_t vacc3xCDEF = vpaddq_s32(vacc3xCD, vacc3xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+    const int32x2_t vsum3x89 = vpadd_s32(vget_low_s32(vacc3x89), vget_high_s32(vacc3x89));
+    const int32x2_t vsum3xAB = vpadd_s32(vget_low_s32(vacc3xAB), vget_high_s32(vacc3xAB));
+    int32x4_t vacc3x89AB = vcombine_s32(vsum3x89, vsum3xAB);
+    const int32x2_t vsum3xCD = vpadd_s32(vget_low_s32(vacc3xCD), vget_high_s32(vacc3xCD));
+    const int32x2_t vsum3xEF = vpadd_s32(vget_low_s32(vacc3xEF), vget_high_s32(vacc3xEF));
+    int32x4_t vacc3xCDEF = vcombine_s32(vsum3xCD, vsum3xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 16;
+    } else {
+      // Final case where not all of the 16 columns fit in the destination.
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..09899c9
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,534 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+
+    size_t k = kc;
+
+    while (k >= 16 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+      int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+      vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+      vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+      vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+      vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+      vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+      vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+      vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+      vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+      vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+      vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+      vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+      vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va0x1 = vext_s8(va0x1, va0x1, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va1x1 = vext_s8(va1x1, va1x1, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va2x1 = vext_s8(va2x1, va2x1, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      va3x1 = vext_s8(va3x1, va3x1, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+      vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+      vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+      vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+      vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+      vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+      vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+      vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+      vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+      vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+      vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+      vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+      vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+      k -= 16 * sizeof(int8_t);
+    }
+
+    if (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+      const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+        const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..b1b0db3
--- /dev/null
+++ b/src/qs8-gemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,400 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-gemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    const int8_t* restrict a,
+    size_t a_stride,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(kc % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  const int8_t* a0 = a;
+  int8_t* c0 = c;
+  const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    a1 = a0;
+    c1 = c0;
+  }
+  const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    a2 = a1;
+    c2 = c1;
+  }
+  const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    a3 = a2;
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+
+    size_t k = kc;
+
+
+    while (k >= 8 * sizeof(int8_t)) {
+      int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+      int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+      int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+      int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+      const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+      int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+      int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+      int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+      int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+      int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+      int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+      int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+      int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+      int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+      int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+      int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+      int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+      va0x0 = vext_s8(va0x0, va0x0, 4);
+      va1x0 = vext_s8(va1x0, va1x0, 4);
+      va2x0 = vext_s8(va2x0, va2x0, 4);
+      va3x0 = vext_s8(va3x0, va3x0, 4);
+      int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+      int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+      int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+      int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+      int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+      int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+      int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+      int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+      int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+      int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+      int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+      int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+      int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+      int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+      int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+      int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+      k -= 8 * sizeof(int8_t);
+    }
+
+    if XNN_UNLIKELY(k != 0) {
+      const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+      const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+      const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+      const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+      const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+      const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+      const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+      const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+      vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+      const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+      vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+      const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+      vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+      const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+      vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+      const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+      const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+      vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+      const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+      vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+      const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+      vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+      const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+      vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+      const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+      const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+      vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+      const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+      vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+      const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+      vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+      const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+      vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+      const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+      const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+      vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+      const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+      vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+      const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+      vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+      const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+      vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+
+      if (k > 4 * sizeof(int8_t)) {
+        const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+        const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+        const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+        const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+        const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+      }
+    }
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+
+      a0 = (const int8_t*) ((uintptr_t) a0 - kc);
+      a1 = (const int8_t*) ((uintptr_t) a1 - kc);
+      a2 = (const int8_t*) ((uintptr_t) a2 - kc);
+      a3 = (const int8_t*) ((uintptr_t) a3 - kc);
+
+      nc -= 8;
+    } else {
+      // Final case where not all of the 8 columns fit in the destination.
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in b/src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
new file mode 100644
index 0000000..473be87
--- /dev/null
+++ b/src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
@@ -0,0 +1,411 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$assert NR % 8 == 0
+$assert 8 <= NR <= 16
+$assert REQUANTIZATION in ["FP32", "GEMMLOWP", "RNDNU"]
+$assert not CHANNELWISE or REQUANTIZATION == "FP32"
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+$if REQUANTIZATION == "FP32" and ARMV8:
+  #include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+$DATATYPE = "qc8" if CHANNELWISE else "qs8"
+$PARAMS_UNION = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("neonv8" if ARMV8 and not CHANNELWISE else "neon")
+$if REQUANTIZATION == "FP32" and CHANNELWISE and not ARMV8:
+  $PARAMS_STRUCT = "neon_fp32"
+$ISA = "neonv8" if ARMV8 else "neon"
+void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c4s2__${ISA}_${"mlal" if MLA else "mull"}_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= ${MR});
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (${MR} * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  $for M in range(1, MR):
+    int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
+    $if M % 2 == 0:
+      if XNN_UNPREDICTABLE(mr <= ${M}) {
+        c${M} = c${M-1};
+      }
+    $elif M + 1 == MR:
+      if XNN_UNPREDICTABLE(mr != ${M+1}) {
+        c${M} = c${M-1};
+      }
+    $else:
+      if XNN_UNPREDICTABLE(mr < ${M+1}) {
+        c${M} = c${M-1};
+      }
+
+  do {
+    $for N in range(0, NR, 2):
+      int32x4_t vacc0x${ABC[N:N+2]} = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    $for M in range(1, MR):
+      $for N in range(0, NR, 2):
+        int32x4_t vacc${M}x${ABC[N:N+2]} = vacc0x${ABC[N:N+2]};
+
+    size_t p = ks;
+    do {
+      $for M in range(MR):
+        const int8_t* restrict a${M} = a[${M}];
+        if XNN_UNPREDICTABLE(a${M} != zero) {
+          a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset);
+        }
+      a += ${MR};
+
+      size_t k = kc;
+
+      $if MLA:
+        while (k >= 16 * sizeof(int8_t)) {
+          $for M in range(MR):
+            int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
+            int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
+
+          $for K in range(2):
+            $for N in range(0, NR, 2):
+              const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          $for K in range(2):
+            $for N in range(0, NR, 2):
+              $for M in range(MR):
+                int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
+              const int8x8_t vb${ABC[N:N+2]}c${K}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+              $for M in range(MR):
+                vprod${M}x${ABC[N:N+2]}c${K} = vmlal_s8(vprod${M}x${ABC[N:N+2]}c${K}, vb${ABC[N:N+2]}c${K}x1, va${M}x1);
+              $for M in range(MR):
+                vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
+            $if K + 1 != 2:
+              $for M in range(MR):
+                va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
+                va${M}x1 = vext_s8(va${M}x1, va${M}x1, 4);
+
+          k -= 16 * sizeof(int8_t);
+        }
+
+      ${"if" if MLA else "while"} (k >= 8 * sizeof(int8_t)) {
+        $for M in range(MR):
+          int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
+
+        $for K in range(2):
+          $for N in range(0, NR, 2):
+            const int8x8_t vb${ABC[N:N+2]}c${K}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        $for K in range(2):
+          $for N in range(0, NR, 2):
+            $for M in range(MR):
+              int16x8_t vprod${M}x${ABC[N:N+2]}c${K} = vmull_s8(vb${ABC[N:N+2]}c${K}x0, va${M}x0);
+            $for M in range(MR):
+              vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c${K});
+          $if K + 1 != 2:
+            $for M in range(MR):
+              va${M}x0 = vext_s8(va${M}x0, va${M}x0, 4);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        $for M in range(MR):
+          const int8x8_t va${M} = vld1_s8(a${M}); a${M} = (const int8_t*) ((uintptr_t) a${M} + k);
+
+        $for N in range(0, NR, 2):
+          const int8x8_t vb${ABC[N:N+2]}c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        $for M in range(MR):
+          const int8x8_t va${M}c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), 0));
+          $for N in range(0, NR, 2):
+            const int16x8_t vprod${M}x${ABC[N:N+2]}c0 = vmull_s8(vb${ABC[N:N+2]}c0, va${M}c0);
+            vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          $for N in range(0, NR, 2):
+            const int8x8_t vb${ABC[N:N+2]}c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          $for M in range(MR):
+            const int8x8_t va${M}c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va${M}), 1));
+            $for N in range(0, NR, 2):
+              const int16x8_t vprod${M}x${ABC[N:N+2]}c1 = vmull_s8(vb${ABC[N:N+2]}c1, va${M}c1);
+              vacc${M}x${ABC[N:N+2]} = vpadalq_s16(vacc${M}x${ABC[N:N+2]}, vprod${M}x${ABC[N:N+2]}c1);
+        }
+      }
+
+      p -= ${MR} * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    $for M in range(MR):
+      $for N in range(0, NR, 4):
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vacc${M}x${ABC[N:N+2]}, vacc${M}x${ABC[N+2:N+4]});
+#else
+    $for M in range(MR):
+      $for N in range(0, NR, 4):
+        const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N:N+2]}), vget_high_s32(vacc${M}x${ABC[N:N+2]}));
+        const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vget_low_s32(vacc${M}x${ABC[N+2:N+4]}), vget_high_s32(vacc${M}x${ABC[N+2:N+4]}));
+        int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
+#endif
+
+    $if REQUANTIZATION == "GEMMLOWP":
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_shift);
+      const int32x4_t vzero_shift_mask = vreinterpretq_s32_u32(vceqq_s32(vright_shift, vmovq_n_s32(0)));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqrdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vsraq_n_s32(vacc${M}x${ABC[N:N+4]}, vbicq_s32(vacc${M}x${ABC[N:N+4]}, vzero_shift_mask), 31);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_shift);
+    $elif REQUANTIZATION == "RNDNU":
+      const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
+      const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
+      const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
+    $elif REQUANTIZATION == "FP32":
+      $for M in range(MR):
+        $for N in range(0, NR, 4):
+          float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
+
+      $if CHANNELWISE:
+        $for N in range(0, NR, 4):
+          const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
+          $for M in range(MR):
+            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
+      $else:
+        const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
+
+      $if ARMV8:
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
+      $else:
+        const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->${PARAMS_STRUCT}.output_min_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vmaxq_f32(vfpacc${M}x${ABC[N:N+4]}, voutput_min_less_zero_point);
+
+        const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->${PARAMS_STRUCT}.output_max_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vfpacc${M}x${ABC[N:N+4]} = vminq_f32(vfpacc${M}x${ABC[N:N+4]}, voutput_max_less_zero_point);
+
+        const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
+
+        const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_zero_point);
+        $for M in range(MR):
+          $for N in range(0, NR, 4):
+            vacc${M}x${ABC[N:N+4]} = vsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_zero_point);
+
+    $if REQUANTIZATION != "FP32" or ARMV8:
+      const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
+#if XNN_ARCH_ARM64
+    $if REQUANTIZATION == "FP32" and not ARMV8:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vuzp1q_s16(vreinterpretq_s16_s32(vacc${M}x${ABC[N:N+4]}), vreinterpretq_s16_s32(vacc${M}x${ABC[N+4:N+8]}));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vuzp1q_s8(vreinterpretq_s8_s16(vacc${M}x${ABC[N:N+8]}), vreinterpretq_s8_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vuzp1q_s8(vreinterpretq_s8_s16(vacc${M-1}x${ABC[N:N+8]}), vreinterpretq_s8_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vmovn_s16(vacc${M}x${ABC[N:N+8]});
+    $else:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]}), voutput_zero_point);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
+#else
+    $if REQUANTIZATION == "FP32" and not ARMV8:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vmovn_s32(vacc${M}x${ABC[N:N+4]}), vmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vmovn_s16(vacc${M}x${ABC[N:N+8]}), vmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vmovn_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vmovn_s16(vacc${M}x${ABC[N:N+8]});
+    $else:
+      $for M in range(MR):
+        $for N in range(0, NR, 8):
+          const int16x8_t vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]})), voutput_zero_point);
+
+      $for M in range(MR):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
+          $elif M % 2 == 1:
+            int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
+#endif
+    $if REQUANTIZATION != "FP32" or ARMV8:
+      $if NR == 8 and MR == 1:
+        const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
+        const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
+      $else:
+        const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
+        const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
+
+      $for M in reversed(range(MR)):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
+          $elif M % 2 == 1:
+            vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
+          $elif M + 1 == MR:
+            $if NR == 8 and MR == 1:
+              vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
+            $else:
+              vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
+
+      $for M in reversed(range(MR)):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
+          $elif M % 2 == 1:
+            vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
+          $elif M + 1 == MR:
+            $if NR == 8 and MR == 1:
+              vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
+            $else:
+              vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
+
+    if (nc >= ${NR}) {
+      $for M in reversed(range(MR)):
+        $for N in range(0, NR, 16):
+          $if N + 8 < NR:
+            vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
+          $elif M % 2 == 1:
+            vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+            vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
+          $elif M + 1 == MR:
+            vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
+
+      $for M in reversed(range(MR)):
+        c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= ${NR};
+    } else {
+      $if NR == 16:
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
+          $elif M + 1 == MR:
+            int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
+        if (nc & 8) {
+          $for M in reversed(range(MR)):
+            $if M % 2 == 1:
+              vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
+              vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
+            $elif M + 1 == MR:
+              vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
+          $for M in reversed(range(MR)):
+            $if M % 2 == 1:
+              vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
+            $elif M + 1 == MR:
+              vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
+        }
+      if (nc & 4) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
+            vst1q_lane_u32(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
+          $elif M + 1 == MR:
+            vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
+      }
+      if (nc & 2) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
+            vst1q_lane_u16(__builtin_assume_aligned(c${M-1}, 1), vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
+          $elif M + 1 == MR:
+            vst1_lane_u16(__builtin_assume_aligned(c${M}, 1), vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
+          $elif M + 1 == MR:
+            vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
+      }
+      if (nc & 1) {
+        $for M in reversed(range(MR)):
+          $if M % 2 == 1:
+            vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
+            vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
+          $elif M + 1 == MR:
+            vst1_lane_s8(c${M}, vout${M}x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..9385120
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,363 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..5820210
--- /dev/null
+++ b/src/qs8-igemm/gen/1x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,273 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
+      if (nc & 8) {
+        vst1_s8(c0, vout0x01234567); c0 += 8;
+        vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..5dfe4a8
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,253 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+
+    int8x8_t vout0x01234567 = vmovn_s16(vacc0x01234567);
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..12859e1
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,248 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..3850cf9
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,250 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..618fca1
--- /dev/null
+++ b/src/qs8-igemm/gen/1x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 1);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (1 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      a += 1;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        }
+      }
+
+      p -= 1 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+
+    int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
+#endif
+    const int8x8_t voutput_min = vld1_dup_s8(&params->rndnu_neon.output_min);
+    const int8x8_t voutput_max = vld1_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
+
+    vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c0 + 0, vout0x01234567);
+
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
+        vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c0, vout0x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..9ec30e3
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,542 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+        vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+        vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+        vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+        vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+        vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+        vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+        vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+        vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..be82cb6
--- /dev/null
+++ b/src/qs8-igemm/gen/2x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,400 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c b/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
new file mode 100644
index 0000000..86bee27
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neon-mlal-padal.c
@@ -0,0 +1,357 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    const float32x4_t voutput_min_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_min_less_zero_point);
+    vfpacc0x0123 = vmaxq_f32(vfpacc0x0123, voutput_min_less_zero_point);
+    vfpacc0x4567 = vmaxq_f32(vfpacc0x4567, voutput_min_less_zero_point);
+    vfpacc1x0123 = vmaxq_f32(vfpacc1x0123, voutput_min_less_zero_point);
+    vfpacc1x4567 = vmaxq_f32(vfpacc1x4567, voutput_min_less_zero_point);
+
+    const float32x4_t voutput_max_less_zero_point = vld1q_dup_f32(&params->fp32_neon.output_max_less_zero_point);
+    vfpacc0x0123 = vminq_f32(vfpacc0x0123, voutput_max_less_zero_point);
+    vfpacc0x4567 = vminq_f32(vfpacc0x4567, voutput_max_less_zero_point);
+    vfpacc1x0123 = vminq_f32(vfpacc1x0123, voutput_max_less_zero_point);
+    vfpacc1x4567 = vminq_f32(vfpacc1x4567, voutput_max_less_zero_point);
+
+    const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
+    vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
+    vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
+    vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
+    vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
+
+    const int32x4_t vmagic_bias_less_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_zero_point);
+    vacc0x0123 = vsubq_s32(vacc0x0123, vmagic_bias_less_zero_point);
+    vacc0x4567 = vsubq_s32(vacc0x4567, vmagic_bias_less_zero_point);
+    vacc1x0123 = vsubq_s32(vacc1x0123, vmagic_bias_less_zero_point);
+    vacc1x4567 = vsubq_s32(vacc1x4567, vmagic_bias_less_zero_point);
+
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc0x0123), vreinterpretq_s16_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vuzp1q_s16(vreinterpretq_s16_s32(vacc1x0123), vreinterpretq_s16_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vuzp1q_s8(vreinterpretq_s8_s16(vacc0x01234567), vreinterpretq_s8_s16(vacc1x01234567));
+#else
+    const int16x8_t vacc0x01234567 = vcombine_s16(vmovn_s32(vacc0x0123), vmovn_s32(vacc0x4567));
+    const int16x8_t vacc1x01234567 = vcombine_s16(vmovn_s32(vacc1x0123), vmovn_s32(vacc1x4567));
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vmovn_s16(vacc0x01234567), vmovn_s16(vacc1x01234567));
+#endif
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c b/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
new file mode 100644
index 0000000..d1e2be9
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c4s2-minmax-fp32-neonv8-mlal-padal.c
@@ -0,0 +1,346 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/intrinsics-polyfill.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
+    float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
+    float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
+    float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
+
+    const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
+    vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
+    vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
+    vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
+    vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
+
+    vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
+    vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
+    vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
+    vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..9068981
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,348 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..855c3c2
--- /dev/null
+++ b/src/qs8-igemm/gen/2x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,270 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 2);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (2 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 2) {
+    c1 = c0;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      a += 2;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        }
+      }
+
+      p -= 2 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..f86d55e
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,725 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+        vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+        vprod2x89c0 = vmlal_s8(vprod2x89c0, vb89c0x1, va2x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+        vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+        vprod2xABc0 = vmlal_s8(vprod2xABc0, vbABc0x1, va2x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+        vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+        vprod2xCDc0 = vmlal_s8(vprod2xCDc0, vbCDc0x1, va2x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+        vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+        vprod2xEFc0 = vmlal_s8(vprod2xEFc0, vbEFc0x1, va2x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va2x1 = vext_s8(va2x1, va2x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+        vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+        vprod2x89c1 = vmlal_s8(vprod2x89c1, vb89c1x1, va2x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+        vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+        vprod2xABc1 = vmlal_s8(vprod2xABc1, vbABc1x1, va2x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+        vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+        vprod2xCDc1 = vmlal_s8(vprod2xCDc1, vbCDc1x1, va2x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+        vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+        vprod2xEFc1 = vmlal_s8(vprod2xEFc1, vbEFc1x1, va2x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+          vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+          const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+          vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+          const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+          vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+          const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+          vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        }
+      }
+
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..efa2cb9
--- /dev/null
+++ b/src/qs8-igemm/gen/3x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,531 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+          vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+          const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+          vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+          const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+          vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+          const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+          vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        }
+      }
+
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c2, vout2x01234567); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..eaf847c
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,452 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va2x1 = vext_s8(va2x1, va2x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        }
+      }
+
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..bf85dd8
--- /dev/null
+++ b/src/qs8-igemm/gen/3x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,346 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 3);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (3 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      a += 3;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        }
+      }
+
+      p -= 3 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c2 + 0, vout2x01234567);
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1_lane_s8(c2, vout2x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..ae74db0
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,904 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+    int32x4_t vacc3x89 = vacc0x89;
+    int32x4_t vacc3xAB = vacc0xAB;
+    int32x4_t vacc3xCD = vacc0xCD;
+    int32x4_t vacc3xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+        int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+        vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+        vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+        vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+        vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+        const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
+        vprod1x89c0 = vmlal_s8(vprod1x89c0, vb89c0x1, va1x1);
+        vprod2x89c0 = vmlal_s8(vprod2x89c0, vb89c0x1, va2x1);
+        vprod3x89c0 = vmlal_s8(vprod3x89c0, vb89c0x1, va3x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+        const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
+        vprod1xABc0 = vmlal_s8(vprod1xABc0, vbABc0x1, va1x1);
+        vprod2xABc0 = vmlal_s8(vprod2xABc0, vbABc0x1, va2x1);
+        vprod3xABc0 = vmlal_s8(vprod3xABc0, vbABc0x1, va3x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+        const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
+        vprod1xCDc0 = vmlal_s8(vprod1xCDc0, vbCDc0x1, va1x1);
+        vprod2xCDc0 = vmlal_s8(vprod2xCDc0, vbCDc0x1, va2x1);
+        vprod3xCDc0 = vmlal_s8(vprod3xCDc0, vbCDc0x1, va3x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+        const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
+        vprod1xEFc0 = vmlal_s8(vprod1xEFc0, vbEFc0x1, va1x1);
+        vprod2xEFc0 = vmlal_s8(vprod2xEFc0, vbEFc0x1, va2x1);
+        vprod3xEFc0 = vmlal_s8(vprod3xEFc0, vbEFc0x1, va3x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va2x1 = vext_s8(va2x1, va2x1, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        va3x1 = vext_s8(va3x1, va3x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+        vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+        vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+        vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+        vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+        const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
+        vprod1x89c1 = vmlal_s8(vprod1x89c1, vb89c1x1, va1x1);
+        vprod2x89c1 = vmlal_s8(vprod2x89c1, vb89c1x1, va2x1);
+        vprod3x89c1 = vmlal_s8(vprod3x89c1, vb89c1x1, va3x1);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+        const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
+        vprod1xABc1 = vmlal_s8(vprod1xABc1, vbABc1x1, va1x1);
+        vprod2xABc1 = vmlal_s8(vprod2xABc1, vbABc1x1, va2x1);
+        vprod3xABc1 = vmlal_s8(vprod3xABc1, vbABc1x1, va3x1);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+        const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
+        vprod1xCDc1 = vmlal_s8(vprod1xCDc1, vbCDc1x1, va1x1);
+        vprod2xCDc1 = vmlal_s8(vprod2xCDc1, vbCDc1x1, va2x1);
+        vprod3xCDc1 = vmlal_s8(vprod3xCDc1, vbCDc1x1, va3x1);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+        const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
+        vprod1xEFc1 = vmlal_s8(vprod1xEFc1, vbEFc1x1, va1x1);
+        vprod2xEFc1 = vmlal_s8(vprod2xEFc1, vbEFc1x1, va2x1);
+        vprod3xEFc1 = vmlal_s8(vprod3xEFc1, vbEFc1x1, va3x1);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+        const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        const int16x8_t vprod3x89c0 = vmull_s8(vb89c0, va3c0);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+        const int16x8_t vprod3xABc0 = vmull_s8(vbABc0, va3c0);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+        const int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0, va3c0);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+        const int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0, va3c0);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+          vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+          const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+          vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+          const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+          vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+          const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+          vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+          const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+          vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+          const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+          vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+          const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+          vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+          const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+          vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+          const int16x8_t vprod3x89c1 = vmull_s8(vb89c1, va3c1);
+          vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+          const int16x8_t vprod3xABc1 = vmull_s8(vbABc1, va3c1);
+          vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+          const int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1, va3c1);
+          vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+          const int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1, va3c1);
+          vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+        }
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+    int32x4_t vacc3x89AB = vpaddq_s32(vacc3x89, vacc3xAB);
+    int32x4_t vacc3xCDEF = vpaddq_s32(vacc3xCD, vacc3xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+    const int32x2_t vsum3x89 = vpadd_s32(vget_low_s32(vacc3x89), vget_high_s32(vacc3x89));
+    const int32x2_t vsum3xAB = vpadd_s32(vget_low_s32(vacc3xAB), vget_high_s32(vacc3xAB));
+    int32x4_t vacc3x89AB = vcombine_s32(vsum3x89, vsum3xAB);
+    const int32x2_t vsum3xCD = vpadd_s32(vget_low_s32(vacc3xCD), vget_high_s32(vacc3xCD));
+    const int32x2_t vsum3xEF = vpadd_s32(vget_low_s32(vacc3xEF), vget_high_s32(vacc3xEF));
+    int32x4_t vacc3xCDEF = vcombine_s32(vsum3xCD, vsum3xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..23c8238
--- /dev/null
+++ b/src/qs8-igemm/gen/4x16c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,658 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x89 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xAB = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xCD = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0xEF = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc1x89 = vacc0x89;
+    int32x4_t vacc1xAB = vacc0xAB;
+    int32x4_t vacc1xCD = vacc0xCD;
+    int32x4_t vacc1xEF = vacc0xEF;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc2x89 = vacc0x89;
+    int32x4_t vacc2xAB = vacc0xAB;
+    int32x4_t vacc2xCD = vacc0xCD;
+    int32x4_t vacc2xEF = vacc0xEF;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+    int32x4_t vacc3x89 = vacc0x89;
+    int32x4_t vacc3xAB = vacc0xAB;
+    int32x4_t vacc3xCD = vacc0xCD;
+    int32x4_t vacc3xEF = vacc0xEF;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
+        int16x8_t vprod1x89c0 = vmull_s8(vb89c0x0, va1x0);
+        int16x8_t vprod2x89c0 = vmull_s8(vb89c0x0, va2x0);
+        int16x8_t vprod3x89c0 = vmull_s8(vb89c0x0, va3x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+        int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
+        int16x8_t vprod1xABc0 = vmull_s8(vbABc0x0, va1x0);
+        int16x8_t vprod2xABc0 = vmull_s8(vbABc0x0, va2x0);
+        int16x8_t vprod3xABc0 = vmull_s8(vbABc0x0, va3x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+        int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
+        int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0x0, va1x0);
+        int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0x0, va2x0);
+        int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0x0, va3x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+        int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
+        int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0x0, va1x0);
+        int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0x0, va2x0);
+        int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0x0, va3x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
+        int16x8_t vprod1x89c1 = vmull_s8(vb89c1x0, va1x0);
+        int16x8_t vprod2x89c1 = vmull_s8(vb89c1x0, va2x0);
+        int16x8_t vprod3x89c1 = vmull_s8(vb89c1x0, va3x0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+        int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
+        int16x8_t vprod1xABc1 = vmull_s8(vbABc1x0, va1x0);
+        int16x8_t vprod2xABc1 = vmull_s8(vbABc1x0, va2x0);
+        int16x8_t vprod3xABc1 = vmull_s8(vbABc1x0, va3x0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+        int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
+        int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1x0, va1x0);
+        int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1x0, va2x0);
+        int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1x0, va3x0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+        int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
+        int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1x0, va1x0);
+        int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1x0, va2x0);
+        int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1x0, va3x0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
+        vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
+        const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
+        vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
+        const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
+        vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
+        const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
+        vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int16x8_t vprod1x89c0 = vmull_s8(vb89c0, va1c0);
+        vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c0);
+        const int16x8_t vprod1xABc0 = vmull_s8(vbABc0, va1c0);
+        vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc0);
+        const int16x8_t vprod1xCDc0 = vmull_s8(vbCDc0, va1c0);
+        vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc0);
+        const int16x8_t vprod1xEFc0 = vmull_s8(vbEFc0, va1c0);
+        vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int16x8_t vprod2x89c0 = vmull_s8(vb89c0, va2c0);
+        vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c0);
+        const int16x8_t vprod2xABc0 = vmull_s8(vbABc0, va2c0);
+        vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc0);
+        const int16x8_t vprod2xCDc0 = vmull_s8(vbCDc0, va2c0);
+        vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc0);
+        const int16x8_t vprod2xEFc0 = vmull_s8(vbEFc0, va2c0);
+        vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc0);
+        const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+        const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        const int16x8_t vprod3x89c0 = vmull_s8(vb89c0, va3c0);
+        vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c0);
+        const int16x8_t vprod3xABc0 = vmull_s8(vbABc0, va3c0);
+        vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc0);
+        const int16x8_t vprod3xCDc0 = vmull_s8(vbCDc0, va3c0);
+        vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc0);
+        const int16x8_t vprod3xEFc0 = vmull_s8(vbEFc0, va3c0);
+        vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
+          vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
+          const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
+          vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
+          const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
+          vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
+          const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
+          vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int16x8_t vprod1x89c1 = vmull_s8(vb89c1, va1c1);
+          vacc1x89 = vpadalq_s16(vacc1x89, vprod1x89c1);
+          const int16x8_t vprod1xABc1 = vmull_s8(vbABc1, va1c1);
+          vacc1xAB = vpadalq_s16(vacc1xAB, vprod1xABc1);
+          const int16x8_t vprod1xCDc1 = vmull_s8(vbCDc1, va1c1);
+          vacc1xCD = vpadalq_s16(vacc1xCD, vprod1xCDc1);
+          const int16x8_t vprod1xEFc1 = vmull_s8(vbEFc1, va1c1);
+          vacc1xEF = vpadalq_s16(vacc1xEF, vprod1xEFc1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int16x8_t vprod2x89c1 = vmull_s8(vb89c1, va2c1);
+          vacc2x89 = vpadalq_s16(vacc2x89, vprod2x89c1);
+          const int16x8_t vprod2xABc1 = vmull_s8(vbABc1, va2c1);
+          vacc2xAB = vpadalq_s16(vacc2xAB, vprod2xABc1);
+          const int16x8_t vprod2xCDc1 = vmull_s8(vbCDc1, va2c1);
+          vacc2xCD = vpadalq_s16(vacc2xCD, vprod2xCDc1);
+          const int16x8_t vprod2xEFc1 = vmull_s8(vbEFc1, va2c1);
+          vacc2xEF = vpadalq_s16(vacc2xEF, vprod2xEFc1);
+          const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+          const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+          vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+          const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+          vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+          const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+          vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+          const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+          vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+          const int16x8_t vprod3x89c1 = vmull_s8(vb89c1, va3c1);
+          vacc3x89 = vpadalq_s16(vacc3x89, vprod3x89c1);
+          const int16x8_t vprod3xABc1 = vmull_s8(vbABc1, va3c1);
+          vacc3xAB = vpadalq_s16(vacc3xAB, vprod3xABc1);
+          const int16x8_t vprod3xCDc1 = vmull_s8(vbCDc1, va3c1);
+          vacc3xCD = vpadalq_s16(vacc3xCD, vprod3xCDc1);
+          const int16x8_t vprod3xEFc1 = vmull_s8(vbEFc1, va3c1);
+          vacc3xEF = vpadalq_s16(vacc3xEF, vprod3xEFc1);
+        }
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
+    int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc1x89AB = vpaddq_s32(vacc1x89, vacc1xAB);
+    int32x4_t vacc1xCDEF = vpaddq_s32(vacc1xCD, vacc1xEF);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc2x89AB = vpaddq_s32(vacc2x89, vacc2xAB);
+    int32x4_t vacc2xCDEF = vpaddq_s32(vacc2xCD, vacc2xEF);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+    int32x4_t vacc3x89AB = vpaddq_s32(vacc3x89, vacc3xAB);
+    int32x4_t vacc3xCDEF = vpaddq_s32(vacc3xCD, vacc3xEF);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
+    const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
+    int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
+    const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
+    const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
+    int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum1x89 = vpadd_s32(vget_low_s32(vacc1x89), vget_high_s32(vacc1x89));
+    const int32x2_t vsum1xAB = vpadd_s32(vget_low_s32(vacc1xAB), vget_high_s32(vacc1xAB));
+    int32x4_t vacc1x89AB = vcombine_s32(vsum1x89, vsum1xAB);
+    const int32x2_t vsum1xCD = vpadd_s32(vget_low_s32(vacc1xCD), vget_high_s32(vacc1xCD));
+    const int32x2_t vsum1xEF = vpadd_s32(vget_low_s32(vacc1xEF), vget_high_s32(vacc1xEF));
+    int32x4_t vacc1xCDEF = vcombine_s32(vsum1xCD, vsum1xEF);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum2x89 = vpadd_s32(vget_low_s32(vacc2x89), vget_high_s32(vacc2x89));
+    const int32x2_t vsum2xAB = vpadd_s32(vget_low_s32(vacc2xAB), vget_high_s32(vacc2xAB));
+    int32x4_t vacc2x89AB = vcombine_s32(vsum2x89, vsum2xAB);
+    const int32x2_t vsum2xCD = vpadd_s32(vget_low_s32(vacc2xCD), vget_high_s32(vacc2xCD));
+    const int32x2_t vsum2xEF = vpadd_s32(vget_low_s32(vacc2xEF), vget_high_s32(vacc2xEF));
+    int32x4_t vacc2xCDEF = vcombine_s32(vsum2xCD, vsum2xEF);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+    const int32x2_t vsum3x89 = vpadd_s32(vget_low_s32(vacc3x89), vget_high_s32(vacc3x89));
+    const int32x2_t vsum3xAB = vpadd_s32(vget_low_s32(vacc3xAB), vget_high_s32(vacc3xAB));
+    int32x4_t vacc3x89AB = vcombine_s32(vsum3x89, vsum3xAB);
+    const int32x2_t vsum3xCD = vpadd_s32(vget_low_s32(vacc3xCD), vget_high_s32(vacc3xCD));
+    const int32x2_t vsum3xEF = vpadd_s32(vget_low_s32(vacc3xEF), vget_high_s32(vacc3xEF));
+    int32x4_t vacc3xCDEF = vcombine_s32(vsum3xCD, vsum3xEF);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
+    vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
+    vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
+    vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+    vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
+    vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
+    vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
+    vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
+    vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+    vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
+    vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
+    vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
+    vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
+    vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+    vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
+    vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
+    int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
+    int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
+    int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+    const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
+
+    int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
+    int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
+    int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
+    int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min);
+    vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
+    vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
+    vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
+
+    vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max);
+    vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
+    vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
+    vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
+
+    if (nc >= 16) {
+      vst1q_s8(c3 + 0, vout3x0123456789ABCDEF);
+      vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
+      vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
+      vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 16;
+    } else {
+      int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF));
+      int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
+      if (nc & 8) {
+        vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8;
+        vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8;
+        vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
+        vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
+        vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF));
+        vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
+      }
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c b/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
new file mode 100644
index 0000000..f3c4d82
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mlal-padal.c
@@ -0,0 +1,550 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+      while (k >= 16 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+        int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
+        vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1);
+        vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1);
+        vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
+        vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1);
+        vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1);
+        vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
+        vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1);
+        vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1);
+        vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
+        vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1);
+        vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1);
+        vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va0x1 = vext_s8(va0x1, va0x1, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va1x1 = vext_s8(va1x1, va1x1, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va2x1 = vext_s8(va2x1, va2x1, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        va3x1 = vext_s8(va3x1, va3x1, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
+        vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1);
+        vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1);
+        vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
+        vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1);
+        vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1);
+        vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
+        vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1);
+        vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1);
+        vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
+        vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1);
+        vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1);
+        vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+        k -= 16 * sizeof(int8_t);
+      }
+
+      if (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+        const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+          const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+          vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+          const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+          vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+          const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+          vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+          const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+          vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        }
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c b/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
new file mode 100644
index 0000000..e7f6303
--- /dev/null
+++ b/src/qs8-igemm/gen/4x8c4s2-minmax-rndnu-neon-mull-padal.c
@@ -0,0 +1,416 @@
+// Auto-generated file. Do not edit!
+//   Template: src/qs8-igemm/c4-neon-mull-padal-shuffle.c.in
+//   Generator: tools/xngen
+//
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <arm_neon.h>
+
+#include <xnnpack/gemm.h>
+#include <xnnpack/math.h>
+
+
+void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal(
+    size_t mr,
+    size_t nc,
+    size_t kc,
+    size_t ks,
+    const int8_t** restrict a,
+    const void* restrict w,
+    int8_t* restrict c,
+    size_t cm_stride,
+    size_t cn_stride,
+    size_t a_offset,
+    const int8_t* zero,
+    const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+{
+  assert(mr != 0);
+  assert(mr <= 4);
+  assert(nc != 0);
+  assert(kc != 0);
+  assert(ks != 0);
+  assert(ks % (4 * sizeof(void*)) == 0);
+  assert(a_offset % sizeof(int8_t) == 0);
+  assert(a != NULL);
+  assert(w != NULL);
+  assert(c != NULL);
+
+  int8_t* c0 = c;
+  int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
+  if XNN_UNPREDICTABLE(mr < 2) {
+    c1 = c0;
+  }
+  int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
+  if XNN_UNPREDICTABLE(mr <= 2) {
+    c2 = c1;
+  }
+  int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
+  if XNN_UNPREDICTABLE(mr != 4) {
+    c3 = c2;
+  }
+
+  do {
+    int32x4_t vacc0x01 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x23 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x45 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc0x67 = vreinterpretq_s32_u64(vshll_n_u32(vld1_u32(w), 0)); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
+    int32x4_t vacc1x01 = vacc0x01;
+    int32x4_t vacc1x23 = vacc0x23;
+    int32x4_t vacc1x45 = vacc0x45;
+    int32x4_t vacc1x67 = vacc0x67;
+    int32x4_t vacc2x01 = vacc0x01;
+    int32x4_t vacc2x23 = vacc0x23;
+    int32x4_t vacc2x45 = vacc0x45;
+    int32x4_t vacc2x67 = vacc0x67;
+    int32x4_t vacc3x01 = vacc0x01;
+    int32x4_t vacc3x23 = vacc0x23;
+    int32x4_t vacc3x45 = vacc0x45;
+    int32x4_t vacc3x67 = vacc0x67;
+
+    size_t p = ks;
+    do {
+      const int8_t* restrict a0 = a[0];
+      if XNN_UNPREDICTABLE(a0 != zero) {
+        a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
+      }
+      const int8_t* restrict a1 = a[1];
+      if XNN_UNPREDICTABLE(a1 != zero) {
+        a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
+      }
+      const int8_t* restrict a2 = a[2];
+      if XNN_UNPREDICTABLE(a2 != zero) {
+        a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
+      }
+      const int8_t* restrict a3 = a[3];
+      if XNN_UNPREDICTABLE(a3 != zero) {
+        a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
+      }
+      a += 4;
+
+      size_t k = kc;
+
+
+      while (k >= 8 * sizeof(int8_t)) {
+        int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
+        int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
+        int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
+        int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
+
+        const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
+        int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0);
+        int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0);
+        int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
+        int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0);
+        int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0);
+        int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
+        int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0);
+        int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0);
+        int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
+        int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0);
+        int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0);
+        int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+        va0x0 = vext_s8(va0x0, va0x0, 4);
+        va1x0 = vext_s8(va1x0, va1x0, 4);
+        va2x0 = vext_s8(va2x0, va2x0, 4);
+        va3x0 = vext_s8(va3x0, va3x0, 4);
+        int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
+        int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0);
+        int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0);
+        int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+        int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
+        int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0);
+        int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0);
+        int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+        int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
+        int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0);
+        int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0);
+        int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+        int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
+        int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0);
+        int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0);
+        int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+
+        k -= 8 * sizeof(int8_t);
+      }
+
+      if XNN_UNLIKELY(k != 0) {
+        const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
+        const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
+        const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
+        const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
+
+        const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+        const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+        const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
+        const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
+        vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
+        const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
+        vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
+        const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
+        vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
+        const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
+        vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
+        const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
+        const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
+        vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
+        const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
+        vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
+        const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
+        vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
+        const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
+        vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
+        const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
+        const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
+        vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
+        const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
+        vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
+        const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
+        vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
+        const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
+        vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
+        const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
+        const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
+        vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
+        const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
+        vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
+        const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
+        vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
+        const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
+        vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
+
+        if (k > 4 * sizeof(int8_t)) {
+          const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+          const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
+
+          const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
+          const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
+          vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
+          const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
+          vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
+          const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
+          vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
+          const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
+          vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
+          const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1));
+          const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
+          vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
+          const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
+          vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
+          const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
+          vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
+          const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
+          vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
+          const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1));
+          const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
+          vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
+          const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
+          vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
+          const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
+          vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
+          const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
+          vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
+          const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1));
+          const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
+          vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
+          const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
+          vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
+          const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
+          vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
+          const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
+          vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
+        }
+      }
+
+      p -= 4 * sizeof(void*);
+    } while (p != 0);
+
+#if XNN_ARCH_ARM64
+    int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
+    int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
+    int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
+    int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
+    int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
+    int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
+    int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
+    int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
+#else
+    const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
+    const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
+    int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
+    const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
+    const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
+    int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
+    const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
+    const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
+    int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
+    const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
+    const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
+    int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
+    const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
+    const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
+    int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
+    const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
+    const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
+    int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
+    const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
+    const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
+    int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
+    const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
+    const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
+    int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
+#endif
+
+    const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
+    const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
+    const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
+
+    vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
+    vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
+    vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
+    vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
+    vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
+    vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
+    vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
+    vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
+
+    vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
+    vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
+    vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
+    vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
+    vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
+    vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
+    vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
+    vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
+
+    vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
+    vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
+    vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
+    vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
+    vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
+    vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
+    vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
+    vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
+
+    const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
+#if XNN_ARCH_ARM64
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
+    int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
+#else
+    const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
+    const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
+    const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
+    const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
+
+    int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
+    int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
+#endif
+    const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
+    const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
+
+    vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
+    vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
+
+    vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
+    vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
+
+    if (nc >= 8) {
+      vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
+      vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
+      vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
+      vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
+
+      c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
+      c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
+      c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
+      c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
+
+      a = (const int8_t**restrict) ((uintptr_t) a - ks);
+
+      nc -= 8;
+    } else {
+      if (nc & 4) {
+        vst1q_lane_u32(__builtin_assume_aligned(c3, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
+        vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
+      }
+      if (nc & 2) {
+        vst1q_lane_u16(__builtin_assume_aligned(c3, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
+        vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
+        vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
+        vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
+      }
+      if (nc & 1) {
+        vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
+        vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
+        vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
+        vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
+      }
+
+      nc = 0;
+    }
+  } while (nc != 0);
+}
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index 85ca07b..1f4b5c9 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -751,6 +751,30 @@
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4__neon_mlal_padal_dup)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4__neon_mlal_padal_dup)
 
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal)
+
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal)
+DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal)
+
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2__neon_mull_padal_dup)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c2__neon_mull_padal_dup)
 DECLARE_QS8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_padal_dup)
@@ -1168,6 +1192,12 @@
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_padal_dup)
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4__neonv8_mlal_padal_dup)
 
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal)
+
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal)
+DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal)
+
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2__neon_mlal_padal_dup)
 DECLARE_QC8_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c2__neon_mlal_padal_dup)
 
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index f05781d..55e6c4a 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -577,6 +577,30 @@
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4__neon_mlal_padal_dup)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4__neon_mlal_padal_dup)
 
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal)
+
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal)
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal)
+
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2__neon_mull_padal_dup)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c2__neon_mull_padal_dup)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_padal_dup)
@@ -925,6 +949,12 @@
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_padal_dup)
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4__neonv8_mlal_padal_dup)
 
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal)
+
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal)
+DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal)
+
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_padal_dup)
 DECLARE_QC8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_padal_dup)
 
diff --git a/test/qc8-gemm-minmax-fp32.cc b/test/qc8-gemm-minmax-fp32.cc
index f82e4f7..1071145 100644
--- a/test/qc8-gemm-minmax-fp32.cc
+++ b/test/qc8-gemm-minmax-fp32.cc
@@ -23,6 +23,1830 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QC8_GEMM_MINMAX_FP32_1X8C2S4__NEON_MLAL_PADAL, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qc8-gemm-minmax-fp32.yaml b/test/qc8-gemm-minmax-fp32.yaml
index b226618..638306e 100644
--- a/test/qc8-gemm-minmax-fp32.yaml
+++ b/test/qc8-gemm-minmax-fp32.yaml
@@ -3,6 +3,18 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
 - name: xnn_qc8_gemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal_padal
   init: xnn_init_qs8_minmax_neon_fp32_params
   k-block: 16
diff --git a/test/qc8-igemm-minmax-fp32.cc b/test/qc8-igemm-minmax-fp32.cc
index 8565cd6..cd5f354 100644
--- a/test/qc8-igemm-minmax-fp32.cc
+++ b/test/qc8-igemm-minmax-fp32.cc
@@ -23,6 +23,1878 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_minmax_neon_fp32_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QC8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_minmax_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QC8_IGEMM_MINMAX_FP32_1X8C2S4__NEON_MLAL_PADAL, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qc8-igemm-minmax-fp32.yaml b/test/qc8-igemm-minmax-fp32.yaml
index b300995..c4d254c 100644
--- a/test/qc8-igemm-minmax-fp32.yaml
+++ b/test/qc8-igemm-minmax-fp32.yaml
@@ -3,6 +3,18 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_minmax_neon_fp32_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
+- name: xnn_qc8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_minmax_neon_params
+  k-block: 16
 - name: xnn_qc8_igemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal_padal
   init: xnn_init_qs8_minmax_neon_fp32_params
   k-block: 16
diff --git a/test/qs8-gemm-minmax-fp32.cc b/test/qs8-gemm-minmax-fp32.cc
index 217e951..6034268 100644
--- a/test/qs8-gemm-minmax-fp32.cc
+++ b/test/qs8-gemm-minmax-fp32.cc
@@ -23,6 +23,1830 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_GEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_GEMM_MINMAX_FP32_1X8C2S4__NEON_MLAL_PADAL, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-gemm-minmax-fp32.yaml b/test/qs8-gemm-minmax-fp32.yaml
index e14b172..25fd06b 100644
--- a/test/qs8-gemm-minmax-fp32.yaml
+++ b/test/qs8-gemm-minmax-fp32.yaml
@@ -3,6 +3,18 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
 - name: xnn_qs8_gemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal_padal
   init: xnn_init_qs8_conv_minmax_fp32_neon_params
   k-block: 16
diff --git a/test/qs8-gemm-minmax-rndnu.cc b/test/qs8-gemm-minmax-rndnu.cc
index 9537fc4..9c24f56 100644
--- a/test/qs8-gemm-minmax-rndnu.cc
+++ b/test/qs8-gemm-minmax-rndnu.cc
@@ -23,6 +23,7302 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .a_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_lt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(11)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(83)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(43)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .a_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_lt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(19)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(37)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .a_stride(163)
+        .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .a_stride(83)
+          .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_GEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_GEMM_MINMAX_RNDNU_1X8C2S4__NEON_MULL_PADAL, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-gemm-minmax-rndnu.yaml b/test/qs8-gemm-minmax-rndnu.yaml
index dc61434..9427b75 100644
--- a/test/qs8-gemm-minmax-rndnu.yaml
+++ b/test/qs8-gemm-minmax-rndnu.yaml
@@ -3,6 +3,54 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_gemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
 - name: xnn_qs8_gemm_minmax_rndnu_ukernel_1x8c2s4__neon_mull_padal
   init: xnn_init_qs8_conv_minmax_rndnu_neon_params
   k-block: 8
diff --git a/test/qs8-igemm-minmax-fp32.cc b/test/qs8-igemm-minmax-fp32.cc
index 80d01e0..78f69ec 100644
--- a/test/qs8-igemm-minmax-fp32.cc
+++ b/test/qs8-igemm-minmax-fp32.cc
@@ -23,6 +23,1878 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neon_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_1X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_FP32_2X8C4S2__NEONV8_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_V8;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal, xnn_init_qs8_conv_minmax_fp32_neonv8_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_IGEMM_MINMAX_FP32_1X8C2S4__NEON_MLAL_PADAL, k_eq_16) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-igemm-minmax-fp32.yaml b/test/qs8-igemm-minmax-fp32.yaml
index 1024cef..aabba65 100644
--- a/test/qs8-igemm-minmax-fp32.yaml
+++ b/test/qs8-igemm-minmax-fp32.yaml
@@ -3,6 +3,18 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_fp32_ukernel_2x8c4s2__neonv8_mlal_padal
+  init: xnn_init_qs8_conv_minmax_fp32_neonv8_params
+  k-block: 16
 - name: xnn_qs8_igemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal_padal
   init: xnn_init_qs8_conv_minmax_fp32_neon_params
   k-block: 16
diff --git a/test/qs8-igemm-minmax-rndnu.cc b/test/qs8-igemm-minmax-rndnu.cc
index 8378366..c9e3a9f 100644
--- a/test/qs8-igemm-minmax-rndnu.cc
+++ b/test/qs8-igemm-minmax-rndnu.cc
@@ -23,6 +23,7494 @@
 
 
 #if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(8)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(43)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(43)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(127)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(127)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MULL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cn_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 8; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(8)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 8; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(11)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_gt_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 9; n < 16; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, n_div_8_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 16; n <= 24; n += 8) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 8; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(8)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(11)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(8)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(8)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(8)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(8)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X8C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(8)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(8)
+      .k(16)
+      .cm_stride(11)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 1; m++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 1; m++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 1; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(1)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(1)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(1)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(83)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 1; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(1)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(1)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(83)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_1X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(1)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(1)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 2; m++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 2; m++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 2; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(2)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(2)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(2)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 2; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(2)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(2)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_2X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(2)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(2)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 3; m++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 3; m++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 3; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(3)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(3)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(3)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(251)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 3; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(3)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(3)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(251)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_3X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(3)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(3)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(m)
+          .n(n)
+          .k(16)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_m) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(m)
+        .n(16)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_eq_16_subtile_n) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(n)
+        .k(16)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_lt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_lt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 17; k < 32; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, k_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 32; k <= 160; k += 16) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(2)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, a_offset) {
+    TEST_REQUIRES_ARM_NEON;
+    for (size_t k = 1; k <= 80; k += 17) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(2)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(331)
+        .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, zero) {
+    TEST_REQUIRES_ARM_NEON;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 80; k += 17) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(2)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(331)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, qmin) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, qmax) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_RNDNU_4X16C4S2__NEON_MLAL_PADAL, strided_cm) {
+    TEST_REQUIRES_ARM_NEON;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(2)
+      .m(4)
+      .n(16)
+      .k(16)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal, xnn_init_qs8_conv_minmax_rndnu_neon_params, xnn_init_qs8_requantization_rndnu_params, xnn_qs8_requantize_rndnu);
+  }
+#endif  // XNN_ARCH_ARM || XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM || XNN_ARCH_ARM64
   TEST(QS8_IGEMM_MINMAX_RNDNU_1X8C2S4__NEON_MULL_PADAL, k_eq_8) {
     TEST_REQUIRES_ARM_NEON;
     GemmMicrokernelTester()
diff --git a/test/qs8-igemm-minmax-rndnu.yaml b/test/qs8-igemm-minmax-rndnu.yaml
index 53695f7..a4fe4d07 100644
--- a/test/qs8-igemm-minmax-rndnu.yaml
+++ b/test/qs8-igemm-minmax-rndnu.yaml
@@ -3,6 +3,54 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mull_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 8
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_2x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
+- name: xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4s2__neon_mlal_padal
+  init: xnn_init_qs8_conv_minmax_rndnu_neon_params
+  k-block: 16
 - name: xnn_qs8_igemm_minmax_rndnu_ukernel_1x8c2s4__neon_mull_padal
   init: xnn_init_qs8_conv_minmax_rndnu_neon_params
   k-block: 8