QS8 4x16c4-aarch64-neondot-ld64 IGEMM microkernel

PiperOrigin-RevId: 362158244
diff --git a/BUILD.bazel b/BUILD.bazel
index e6b902a..2a5e902 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1700,112 +1700,112 @@
     "src/qs8-gemm/gen/1x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/1x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x8c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/1x8c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x16-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/1x16-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/1x16c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/1x16c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/1x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/1x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x8-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/2x8-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/2x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x8c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/2x8c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x16-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/2x16-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/2x16c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/2x16c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/2x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/2x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x8-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/3x8-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/3x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/3x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x8c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/3x8c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x16-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/3x16-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/3x16c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/3x16c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/3x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/3x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x8-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/4x8-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/4x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/4x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x8c8-minmax-neon-mull-padal.c",
+    "src/qs8-gemm/gen/4x8c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x16-minmax-neon-mlal-lane.c",
     "src/qs8-gemm/gen/4x16-minmax-neon-mull-addw-dup.c",
     "src/qs8-gemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c",
     "src/qs8-gemm/gen/4x16c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-gemm/gen/4x16c8-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x16c8-minmax-neon-mull-padal.c",
-    "src/qs8-gemm/gen/1x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/2x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/3x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/4x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/1x16c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/2x16c16-minmax-neon-mlal-padal.c",
-    "src/qs8-gemm/gen/3x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-gemm/gen/4x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/1x8-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/1x16-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/2x8-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/2x16-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/3x8-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/3x16-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/4x8-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/4x16-minmax-neon-mlal-lane.c",
-    "src/qs8-igemm/gen/1x16c8-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/1x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/1x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/1x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-igemm/gen/1x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/1x8c8-minmax-neon-mull-padal.c",
-    "src/qs8-igemm/gen/2x16c8-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/2x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/1x8c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/1x16-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/1x16-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/1x16c2-minmax-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/1x16c8-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/1x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/1x16c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x8-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/2x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/2x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-igemm/gen/2x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/2x8c8-minmax-neon-mull-padal.c",
-    "src/qs8-igemm/gen/3x16c8-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/3x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/2x8c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x16-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/2x16-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/2x16c2-minmax-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/2x16c8-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/2x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/2x16c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/3x8-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/3x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/3x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-igemm/gen/3x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/3x8c8-minmax-neon-mull-padal.c",
-    "src/qs8-igemm/gen/4x16c8-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/4x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/3x8c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/3x16-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/3x16-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/3x16c2-minmax-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/3x16c8-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/3x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/3x16c16-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/4x8-minmax-neon-mlal-lane.c",
+    "src/qs8-igemm/gen/4x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/4x8c2-minmax-neon-mull-padal-dup.c",
     "src/qs8-igemm/gen/4x8c8-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/4x8c8-minmax-neon-mull-padal.c",
-    "src/qs8-igemm/gen/1x16c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/1x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/2x16c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/2x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/3x16c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/3x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/4x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-igemm/gen/4x8c16-minmax-neon-mlal-padal.c",
-    "src/qs8-igemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/1x8c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c",
-    "src/qs8-igemm/gen/1x16c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/1x8c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/2x16c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/2x8c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/3x16c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/3x8c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/4x16c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/4x8c2-minmax-neon-mull-padal-dup.c",
-    "src/qs8-igemm/gen/1x16-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/1x8-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/2x16-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/2x8-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/3x16-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/3x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/4x16-minmax-neon-mlal-lane.c",
     "src/qs8-igemm/gen/4x16-minmax-neon-mull-addw-dup.c",
-    "src/qs8-igemm/gen/4x8-minmax-neon-mull-addw-dup.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c",
+    "src/qs8-igemm/gen/4x16c2-minmax-neon-mull-padal-dup.c",
+    "src/qs8-igemm/gen/4x16c8-minmax-neon-mlal-padal.c",
+    "src/qs8-igemm/gen/4x16c8-minmax-neon-mull-padal.c",
+    "src/qs8-igemm/gen/4x16c16-minmax-neon-mlal-padal.c",
     "src/qs8-requantization/fp32-neon.c",
     "src/qs8-requantization/precise-neon.c",
     "src/qs8-requantization/q31-neon.c",
@@ -3564,11 +3564,12 @@
     "src/f32-igemm/gen/6x8-minmax-aarch64-neonfma-cortex-a75.S",
     "src/qs8-gemm/1x16c4-aarch64-neondot-ld32.S",
     "src/qs8-gemm/1x16c4-aarch64-neondot-ld64.S",
-    "src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S",
     "src/qs8-gemm/2x8c16-aarch64-neon-mlal-padal.S",
-    "src/qs8-gemm/4x16c4-aarch64-neondot-ld64.S",
+    "src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S",
     "src/qs8-gemm/4x16c4-aarch64-neondot-ld32.S",
+    "src/qs8-gemm/4x16c4-aarch64-neondot-ld64.S",
     "src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S",
+    "src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S",
 ]
 
 INTERNAL_MICROKERNEL_HDRS = [
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5b65062..28f075e 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -949,112 +949,112 @@
   src/qs8-gemm/gen/1x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/1x8c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/1x8c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/1x8c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/1x16-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/1x16-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/1x16c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/1x16c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/1x16c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/1x16c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/2x8-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/2x8-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/2x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/2x8c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/2x8c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/2x8c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/2x16-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/2x16-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/2x16c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/2x16c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/2x16c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/2x16c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/3x8-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/3x8-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/3x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/3x8c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/3x8c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/3x8c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/3x16-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/3x16-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/3x16c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/3x16c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/3x16c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/3x16c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/4x8-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/4x8-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/4x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/4x8c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/4x8c8-minmax-neon-mull-padal.c
+  src/qs8-gemm/gen/4x8c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/4x16-minmax-neon-mlal-lane.c
   src/qs8-gemm/gen/4x16-minmax-neon-mull-addw-dup.c
   src/qs8-gemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c
   src/qs8-gemm/gen/4x16c2-minmax-neon-mull-padal-dup.c
   src/qs8-gemm/gen/4x16c8-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/4x16c8-minmax-neon-mull-padal.c
-  src/qs8-gemm/gen/1x8c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/2x8c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/3x8c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/4x8c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/1x16c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/2x16c16-minmax-neon-mlal-padal.c
-  src/qs8-gemm/gen/3x16c16-minmax-neon-mlal-padal.c
   src/qs8-gemm/gen/4x16c16-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/1x8-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/1x16-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/2x8-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/2x16-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/3x8-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/3x16-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/4x8-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/4x16-minmax-neon-mlal-lane.c
-  src/qs8-igemm/gen/1x16c8-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/1x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/1x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/1x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-igemm/gen/1x8c8-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/1x8c8-minmax-neon-mull-padal.c
-  src/qs8-igemm/gen/2x16c8-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/2x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/1x8c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/1x16-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/1x16-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/1x16c2-minmax-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/1x16c8-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/1x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/1x16c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x8-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/2x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/2x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-igemm/gen/2x8c8-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/2x8c8-minmax-neon-mull-padal.c
-  src/qs8-igemm/gen/3x16c8-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/3x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/2x8c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x16-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/2x16-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/2x16c2-minmax-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/2x16c8-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/2x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/2x16c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/3x8-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/3x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/3x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-igemm/gen/3x8c8-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/3x8c8-minmax-neon-mull-padal.c
-  src/qs8-igemm/gen/4x16c8-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/4x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/3x8c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/3x16-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/3x16-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/3x16c2-minmax-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/3x16c8-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/3x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/3x16c16-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/4x8-minmax-neon-mlal-lane.c
+  src/qs8-igemm/gen/4x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/4x8c2-minmax-neon-mull-padal-dup.c
   src/qs8-igemm/gen/4x8c8-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/4x8c8-minmax-neon-mull-padal.c
-  src/qs8-igemm/gen/1x16c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/1x8c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/2x16c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/2x8c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/3x16c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/3x8c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/4x16c16-minmax-neon-mlal-padal.c
   src/qs8-igemm/gen/4x8c16-minmax-neon-mlal-padal.c
-  src/qs8-igemm/gen/1x16c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/1x8c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/2x16c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/2x8c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/3x16c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/3x8c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/4x8c2-minmax-neon-mlal-padal-dup.c
-  src/qs8-igemm/gen/1x16c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/1x8c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/2x16c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/2x8c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/3x16c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/3x8c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/4x16c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/4x8c2-minmax-neon-mull-padal-dup.c
-  src/qs8-igemm/gen/1x16-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/1x8-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/2x16-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/2x8-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/3x16-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/3x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/4x16-minmax-neon-mlal-lane.c
   src/qs8-igemm/gen/4x16-minmax-neon-mull-addw-dup.c
-  src/qs8-igemm/gen/4x8-minmax-neon-mull-addw-dup.c
+  src/qs8-igemm/gen/4x16c2-minmax-neon-mlal-padal-dup.c
+  src/qs8-igemm/gen/4x16c2-minmax-neon-mull-padal-dup.c
+  src/qs8-igemm/gen/4x16c8-minmax-neon-mlal-padal.c
+  src/qs8-igemm/gen/4x16c8-minmax-neon-mull-padal.c
+  src/qs8-igemm/gen/4x16c16-minmax-neon-mlal-padal.c
   src/qs8-requantization/fp32-neon.c
   src/qs8-requantization/precise-neon.c
   src/qs8-requantization/q31-neon.c
@@ -2790,11 +2790,12 @@
   src/f32-igemm/gen/6x8-minmax-aarch64-neonfma-cortex-a75.S
   src/qs8-gemm/1x16c4-aarch64-neondot-ld32.S
   src/qs8-gemm/1x16c4-aarch64-neondot-ld64.S
-  src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S
   src/qs8-gemm/2x8c16-aarch64-neon-mlal-padal.S
+  src/qs8-gemm/4x16c4-aarch64-neondot-cortex-a55.S
   src/qs8-gemm/4x16c4-aarch64-neondot-ld32.S
   src/qs8-gemm/4x16c4-aarch64-neondot-ld64.S
-  src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S)
+  src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S
+  src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S)
 
 SET(XNNPACK_MICROKERNEL_SRCS ${XNNPACK_SCALAR_MICROKERNEL_SRCS})
 IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv[5-8]" OR IOS_ARCH MATCHES "^armv7")
diff --git a/bench/qs8-gemm-e2e.cc b/bench/qs8-gemm-e2e.cc
index 4df5faf..98b1eea 100644
--- a/bench/qs8-gemm-e2e.cc
+++ b/bench/qs8-gemm-e2e.cc
@@ -117,7 +117,7 @@
   static void qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64,
-      xnn_qs8_igemm_minmax_ukernel_4x16c4__neondot,
+      xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64,
       xnn_qs8_gemm_minmax_ukernel_1x16c4__aarch64_neondot_ld64,
       xnn_qs8_igemm_minmax_ukernel_1x16c4__neondot,
       4 /* mr */, 16 /* nr */, 2 /* log2_kr */, 0 /* log2_sr */,
diff --git a/src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S b/src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S
index 16e4893..a4809e7 100644
--- a/src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S
+++ b/src/qs8-igemm/4x16c4-aarch64-neondot-cortex-a55.S
@@ -19,10 +19,6 @@
 #     const float* zero,                 [sp + 16] -> x12
 #     const xnn_f32_minmax_params params [sp + 24] -> x8
 
-
-#     size_t cn_stride,          [sp] -> x12
-#     const union xnn_qs8_igemm_params params)  [sp + 8] -> x11
-
 # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
 
 # Register usage
@@ -37,25 +33,25 @@
 # C3   x7 v19 v23 v27 v31
 # unused v12 v13 v14 v15
 
+# x14 temp for A55 loads.
+
 BEGIN_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55
 
         # Load cn_stride, a_offset
-        LDP x10, x11, [sp]
+        LDP     x10, x11, [sp]
 
         # Load zero, params pointer
-        LDP x12, x8, [sp, 16]
+        LDP     x12, x8, [sp, 16]
 
         # Save x20-x21 on stack
-        STP x20, x21, [sp, -48]!
-
-        # Save d8-d11 on stack
-        STP  d8,  d9, [sp, 16]
-        STP d10, d11, [sp, 32]
+        STP     x20, x21, [sp, -48]!
 
         # Clamp C pointers
         CMP      x0, 2              // if mr < 2
-        ADD      x16, x6, x7        // c1 = c0 + cm_stride
-        CSEL     x16, x6,  x16, LO  //   c1 = c0
+        STP      d8,  d9, [sp, 16]  // Save d8-d11 on stack
+        ADD     x16, x6, x7         // c1 = c0 + cm_stride
+        STP     d10, d11, [sp, 32]
+        CSEL    x16, x6,  x16, LO   //   c1 = c0
         ADD      x2, x2, 3          // kc = (kc + 3) & ~3
 
         ADD     x17, x16, x7        // c2 = c1 + cm_stride
@@ -84,9 +80,9 @@
         MOV     v29.16b, v28.16b
         MOV     v30.16b, v28.16b
         MOV     v31.16b, v28.16b
-
         MOV     x9, x3  // p = ks
 
+        .p2align 3
 1:
         # Load next 4 A pointers
         LDP     x20, x15, [x4], 16
@@ -578,11 +574,11 @@
         B.HI    0b
 
         # Restore d8-d15 from stack
-        LDP d10, d11, [sp, 32]
-        LDP  d8,  d9, [sp, 16]
+        LDP     d10, d11, [sp, 32]
+        LDP      d8,  d9, [sp, 16]
 
         # Restore x20-x21 from stack
-        LDP x20, x21, [sp], 48
+        LDP     x20, x21, [sp], 48
         RET
 
         # Remainder- 4 to 12 bytes of A
@@ -695,11 +691,11 @@
         ST1     {v4.b}[0], [x6]
 11:
         # Restore d8-d15 from stack
-        LDP d10, d11, [sp, 32]
-        LDP  d8,  d9, [sp, 16]
+        LDP     d10, d11, [sp, 32]
+        LDP      d8,  d9, [sp, 16]
 
         # Restore x20-x21 from stack
-        LDP x20, x21, [sp], 48
+        LDP     x20, x21, [sp], 48
         RET
 
 END_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55
diff --git a/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S b/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S
new file mode 100644
index 0000000..d6a9fbf
--- /dev/null
+++ b/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S
@@ -0,0 +1,375 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64(
+#     size_t mr,                 x0
+#     size_t nc,                 x1
+#     size_t kc,                 x2 / x0
+#     size_t ks,                 x3 / x9
+#     const int8_t**restrict a,  x4
+#     const int8_t* restrict w,  x5
+#     int8_t* restrict c,        x6
+#     size_t cm_stride,          x7
+#     size_t cn_stride,                  [sp] -> x10
+#     size_t a_offset,                   [sp + 8] -> x11
+#     const float* zero,                 [sp + 16] -> x12
+#     const xnn_f32_minmax_params params [sp + 24] -> x8
+
+# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
+
+# Register usage
+# A0  x20  v0
+# A1  x15  v1
+# A2  x13  v2
+# A3  x21  v3
+# B    x5  v4  v5  v6  v7
+# C0   x6 v16 v20 v24 v28
+# C1  x16 v17 v21 v25 v29
+# C2  x17 v18 v22 v26 v30
+# C3   x7 v19 v23 v27 v31
+# unused v8 v9 v10 v11 v12 v13 v14 v15
+
+BEGIN_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64
+
+        # Clamp C pointers
+        CMP      x0, 2                // if mr < 2
+        LDP     x10, x11, [sp]        // Load cn_stride, a_offset
+        ADD     x16, x6, x7           // c1 = c0 + cm_stride
+        CSEL    x16, x6,  x16, LO     //   c1 = c0
+        ADD      x2, x2, 3            // kc = (kc + 3) & ~3
+
+        ADD     x17, x16, x7          // c2 = c1 + cm_stride
+        LDP     x12, x8, [sp, 16]     // Load zero, params pointer
+                                      // if mr <= 2
+        CSEL    x17, x16, x17, LS     //   c2 = c1
+        BIC      x2, x2, 3
+
+        CMP      x0, 4                // if mr < 4
+        STP     x20, x21, [sp, -16]!  // Save x20-x21 on stack
+        ADD      x7,  x17, x7         // c3 = c2 + cm_stride
+        CSEL     x7,  x17, x7, LO     //   c3 = c2
+
+        .p2align 3
+0:
+        # Load initial bias from w into accumulators
+        LDP     q16, q20, [x5], 32
+        MOV     v17.16b, v16.16b
+        MOV     v18.16b, v16.16b
+        LDP     q24, q28, [x5], 32
+        MOV     v19.16b, v16.16b
+        MOV     v21.16b, v20.16b
+        MOV     v22.16b, v20.16b
+        MOV     v23.16b, v20.16b
+        MOV     v25.16b, v24.16b
+        MOV     v26.16b, v24.16b
+        MOV     v27.16b, v24.16b
+        MOV     v29.16b, v28.16b
+        MOV     v30.16b, v28.16b
+        MOV     v31.16b, v28.16b
+        MOV     x9, x3  // p = ks
+
+        .p2align 3
+1:
+        # Load next 4 A pointers
+        LDP     x20, x15, [x4], 16
+        LDP     x13, x21, [x4], 16
+
+        CMP     x20, x12           // if a0 == zero
+        ADD     x20, x20, x11      // a0 += a_offset
+        CSEL    x20, x12, x20, EQ  //   a0 = zero, else += a0 + a_offset
+        CMP     x15, x12           // if a1 == zero
+        ADD     x15, x15, x11      // a1 += a_offset
+        CSEL    x15, x12, x15, EQ  //   a1 = zero, else += a1 + a_offset
+        CMP     x13, x12           // if a2 == zero
+        ADD     x13, x13, x11      // a2 += a_offset
+        CSEL    x13, x12, x13, EQ  //   a2 = zero, else += a2 + a_offset
+        CMP     x21, x12           // if a3 == zero
+        ADD     x21, x21, x11      // a3 += a_offset
+        CSEL    x21, x12, x21, EQ  //   a3 = zero, else += a3 + a_offset
+
+        # Is there at least 8 bytes for main loop?
+        SUBS    x0, x2, 8          // k = kc - 8
+        B.LO    4f
+
+        # Main loop - 8 bytes of A
+        .p2align 3
+2:
+        LDR     d0, [x20], 8
+        LDR     q4,  [x5], 16
+        LDR     d1, [x15], 8
+        LDR     d2, [x13], 8
+        LDR     d3, [x21], 8
+        LDR     q5,  [x5], 16
+        SDOT    v16.4s, v4.16b,  v0.4b[0]
+        SDOT    v17.4s, v4.16b,  v1.4b[0]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[0]
+        SDOT    v19.4s, v4.16b,  v3.4b[0]
+        SDOT    v20.4s, v5.16b,  v0.4b[0]
+        SDOT    v21.4s, v5.16b,  v1.4b[0]
+        SDOT    v22.4s, v5.16b,  v2.4b[0]
+        SDOT    v23.4s, v5.16b,  v3.4b[0]
+        SDOT    v24.4s, v6.16b, v0.4b[0]
+        SDOT    v25.4s, v6.16b, v1.4b[0]
+        LDP     q4, q5, [x5], 32
+        SDOT    v26.4s, v6.16b, v2.4b[0]
+        SDOT    v27.4s, v6.16b, v3.4b[0]
+        SDOT    v28.4s, v7.16b, v0.4b[0]
+        SDOT    v29.4s, v7.16b, v1.4b[0]
+        SDOT    v30.4s, v7.16b, v2.4b[0]
+        SDOT    v31.4s, v7.16b, v3.4b[0]
+        SDOT    v16.4s, v4.16b,  v0.4b[1]
+        SDOT    v17.4s, v4.16b,  v1.4b[1]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[1]
+        SDOT    v19.4s, v4.16b,  v3.4b[1]
+        SDOT    v20.4s, v5.16b,  v0.4b[1]
+        SDOT    v21.4s, v5.16b,  v1.4b[1]
+        SDOT    v22.4s, v5.16b,  v2.4b[1]
+        SDOT    v23.4s, v5.16b,  v3.4b[1]
+        SDOT    v24.4s, v6.16b,  v0.4b[1]
+        SDOT    v25.4s, v6.16b,  v1.4b[1]
+        SDOT    v26.4s, v6.16b,  v2.4b[1]
+        SDOT    v27.4s, v6.16b,  v3.4b[1]
+        SDOT    v28.4s, v7.16b,  v0.4b[1]
+        SDOT    v29.4s, v7.16b,  v1.4b[1]
+        SDOT    v30.4s, v7.16b,  v2.4b[1]
+        SUBS    x0, x0, 8
+        SDOT    v31.4s, v7.16b,  v3.4b[1]
+        B.HS    2b
+
+        # Is there a remainder?- 4 bytes of A
+        TBNZ    x0, 2, 4f
+
+3:
+        # ks loop
+        SUBS    x9, x9, 32  // ks -= MR * sizeof(int8_t*)
+        B.HI    1b
+
+        # Apply params - scale, shift, bias and clamp
+        LD2R    {v0.4s, v1.4s}, [x8], 8
+        CMEQ    v2.4s, v1.4s, 0
+
+        BIC     v4.16b, v16.16b, v2.16b
+        BIC     v5.16b, v17.16b, v2.16b
+        BIC     v6.16b, v18.16b, v2.16b
+        BIC     v7.16b, v19.16b, v2.16b
+
+        SQRDMULH  v16.4s, v16.4s, v0.4s
+        SQRDMULH  v17.4s, v17.4s, v0.4s
+        SQRDMULH  v18.4s, v18.4s, v0.4s
+        SQRDMULH  v19.4s, v19.4s, v0.4s
+
+        SSRA    v16.4s, v4.4s, 31  // signed shift right accumulate
+        SSRA    v17.4s, v5.4s, 31
+        SSRA    v18.4s, v6.4s, 31
+        SSRA    v19.4s, v7.4s, 31
+
+        BIC     v4.16b, v20.16b, v2.16b
+        BIC     v5.16b, v21.16b, v2.16b
+        BIC     v6.16b, v22.16b, v2.16b
+        BIC     v7.16b, v23.16b, v2.16b
+
+        SQRDMULH  v20.4s, v20.4s, v0.4s
+        SQRDMULH  v21.4s, v21.4s, v0.4s
+        SQRDMULH  v22.4s, v22.4s, v0.4s
+        SQRDMULH  v23.4s, v23.4s, v0.4s
+
+        SSRA    v20.4s, v4.4s, 31
+        SSRA    v21.4s, v5.4s, 31
+        SSRA    v22.4s, v6.4s, 31
+        SSRA    v23.4s, v7.4s, 31
+
+        BIC     v4.16b, v24.16b, v2.16b
+        BIC     v5.16b, v25.16b, v2.16b
+        BIC     v6.16b, v26.16b, v2.16b
+        BIC     v7.16b, v27.16b, v2.16b
+
+        SQRDMULH  v24.4s, v24.4s, v0.4s
+        SQRDMULH  v25.4s, v25.4s, v0.4s
+        SQRDMULH  v26.4s, v26.4s, v0.4s
+        SQRDMULH  v27.4s, v27.4s, v0.4s
+
+        SSRA    v24.4s, v4.4s, 31
+        SSRA    v25.4s, v5.4s, 31
+        SSRA    v26.4s, v6.4s, 31
+        SSRA    v27.4s, v7.4s, 31
+
+        BIC     v4.16b, v28.16b, v2.16b
+        BIC     v5.16b, v29.16b, v2.16b
+        BIC     v6.16b, v30.16b, v2.16b
+        BIC     v7.16b, v31.16b, v2.16b
+
+        SQRDMULH  v28.4s, v28.4s, v0.4s
+        SQRDMULH  v29.4s, v29.4s, v0.4s
+        SQRDMULH  v30.4s, v30.4s, v0.4s
+        SQRDMULH  v31.4s, v31.4s, v0.4s
+
+        SSRA    v28.4s, v4.4s, 31
+        SSRA    v29.4s, v5.4s, 31
+        SSRA    v30.4s, v6.4s, 31
+        SSRA    v31.4s, v7.4s, 31
+
+        SRSHL   v16.4s, v16.4s, v1.4s  // signed rounding shift left
+        SRSHL   v17.4s, v17.4s, v1.4s
+        SRSHL   v18.4s, v18.4s, v1.4s
+        SRSHL   v19.4s, v19.4s, v1.4s
+        SRSHL   v20.4s, v20.4s, v1.4s
+        SRSHL   v21.4s, v21.4s, v1.4s
+        SRSHL   v22.4s, v22.4s, v1.4s
+        SRSHL   v23.4s, v23.4s, v1.4s
+        SRSHL   v24.4s, v24.4s, v1.4s
+        SRSHL   v25.4s, v25.4s, v1.4s
+        SRSHL   v26.4s, v26.4s, v1.4s
+        SRSHL   v27.4s, v27.4s, v1.4s
+        SRSHL   v28.4s, v28.4s, v1.4s
+        SRSHL   v29.4s, v29.4s, v1.4s
+        SRSHL   v30.4s, v30.4s, v1.4s
+        SRSHL   v31.4s, v31.4s, v1.4s
+
+        SQXTN   v16.4h, v16.4s
+        SQXTN   v17.4h, v17.4s
+        SQXTN   v18.4h, v18.4s
+        SQXTN   v19.4h, v19.4s
+        SQXTN   v24.4h, v24.4s
+        SQXTN   v25.4h, v25.4s
+        SQXTN   v26.4h, v26.4s
+        SQXTN   v27.4h, v27.4s
+        LD1R    {v2.8h}, [x8], 2   // add bias
+
+        SQXTN2  v16.8h, v20.4s
+        SQXTN2  v17.8h, v21.4s
+        SQXTN2  v18.8h, v22.4s
+        SQXTN2  v19.8h, v23.4s
+        SQXTN2  v24.8h, v28.4s
+        SQXTN2  v25.8h, v29.4s
+        SQXTN2  v26.8h, v30.4s
+        SQXTN2  v27.8h, v31.4s
+
+        SQADD   v16.8h, v16.8h, v2.8h
+        SQADD   v17.8h, v17.8h, v2.8h
+        SQADD   v18.8h, v18.8h, v2.8h
+        SQADD   v19.8h, v19.8h, v2.8h
+        SQADD   v24.8h, v24.8h, v2.8h
+        SQADD   v25.8h, v25.8h, v2.8h
+        SQADD   v26.8h, v26.8h, v2.8h
+        SQADD   v27.8h, v27.8h, v2.8h
+        LD1R    {v0.16b}, [x8], 1  // clamp min value
+
+        SQXTN    v4.8b, v16.8h
+        SQXTN    v5.8b, v17.8h
+        SQXTN    v6.8b, v18.8h
+        SQXTN    v7.8b, v19.8h
+        LD1R    {v1.16b}, [x8]     // clamp max value
+        SQXTN2   v4.16b, v24.8h
+        SQXTN2   v5.16b, v25.8h
+        SQXTN2   v6.16b, v26.8h
+        SQXTN2   v7.16b, v27.8h
+        SUB      x8, x8, 11       // rewind params pointer
+
+        SMAX    v4.16b, v4.16b, v0.16b
+        SMAX    v5.16b, v5.16b, v0.16b
+        SMAX    v6.16b, v6.16b, v0.16b
+        SMAX    v7.16b, v7.16b, v0.16b
+        SUBS    x1, x1, 16
+        SMIN    v4.16b, v4.16b, v1.16b
+        SMIN    v5.16b, v5.16b, v1.16b
+        SMIN    v6.16b, v6.16b, v1.16b
+        SMIN    v7.16b, v7.16b, v1.16b
+        B.LO    5f
+
+        # Store full 4 x 16
+        ST1     {v7.16b},  [x7], x10
+        ST1     {v6.16b}, [x17], x10
+        ST1     {v5.16b}, [x16], x10
+        ST1     {v4.16b},  [x6], x10
+
+        SUB     x4, x4, x3  // a -= ks
+
+        # nc loop
+        B.HI    0b
+
+        # Restore x20-x21 from stack
+        LDP     x20, x21, [sp], 16
+        RET
+
+        # Remainder- 4 bytes of A
+        .p2align 3
+4:
+        LDR     s0, [x20], 4
+        LDR     q4,  [x5], 16
+        LDR     s1, [x15], 4
+        LDR     s2, [x13], 4
+        LDR     s3, [x21], 4
+        LDR     q5,  [x5], 16
+        SDOT    v16.4s, v4.16b,  v0.4b[0]
+        SDOT    v17.4s, v4.16b,  v1.4b[0]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[0]
+        SDOT    v19.4s, v4.16b,  v3.4b[0]
+        SDOT    v20.4s, v5.16b,  v0.4b[0]
+        SDOT    v21.4s, v5.16b,  v1.4b[0]
+        SDOT    v22.4s, v5.16b,  v2.4b[0]
+        SDOT    v23.4s, v5.16b,  v3.4b[0]
+        SDOT    v24.4s, v6.16b, v0.4b[0]
+        SDOT    v25.4s, v6.16b, v1.4b[0]
+        SDOT    v26.4s, v6.16b, v2.4b[0]
+        SDOT    v27.4s, v6.16b, v3.4b[0]
+        SDOT    v28.4s, v7.16b, v0.4b[0]
+        SDOT    v29.4s, v7.16b, v1.4b[0]
+        SDOT    v30.4s, v7.16b, v2.4b[0]
+        SDOT    v31.4s, v7.16b, v3.4b[0]
+        B       3b
+
+        # Store odd width
+        .p2align 3
+5:
+        TBZ     x1, 3, 6f
+        STR     d7, [x7], 8
+        DUP     d7, v7.d[1]
+        STR     d6, [x17], 8
+        DUP     d6, v6.d[1]
+        STR     d5, [x16], 8
+        DUP     d5, v5.d[1]
+        STR     d4, [x6], 8
+        DUP     d4, v4.d[1]
+6:
+        TBZ     x1, 2, 7f
+        STR     s7, [x7], 4
+        DUP     s7, v7.s[1]
+        STR     s6, [x17], 4
+        DUP     s6, v6.s[1]
+        STR     s5, [x16], 4
+        DUP     s5, v5.s[1]
+        STR     s4, [x6], 4
+        DUP     s4, v4.s[1]
+7:
+        TBZ     x1, 1, 8f
+        ST1     {v7.h}[0], [x7], 2
+        DUP      h7, v7.h[1]
+        ST1     {v6.h}[0], [x17], 2
+        DUP      h6, v6.h[1]
+        ST1     {v5.h}[0], [x16], 2
+        DUP      h5, v5.h[1]
+        ST1     {v4.h}[0], [x6], 2
+        DUP      h4, v4.h[1]
+8:
+        TBZ     x1, 0, 9f
+        ST1     {v7.b}[0], [x7]
+        ST1     {v6.b}[0], [x17]
+        ST1     {v5.b}[0], [x16]
+        ST1     {v4.b}[0], [x6]
+9:
+        # Restore x20-x21 from stack
+        LDP x20, x21, [sp], 16
+        RET
+
+END_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 49c91d8..9130fc0 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -407,6 +407,7 @@
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_6x16c4__neondot)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_8x16c4__neondot)
 
+DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64)
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55)
 
 DECLARE_QS8_IGEMM_MINMAX_UKERNEL_FUNCTION(xnn_qs8_igemm_minmax_ukernel_1x4c2__sse2_ld64)
diff --git a/test/qs8-igemm-minmax.cc b/test/qs8-igemm-minmax.cc
index 0828030..8bf1008 100644
--- a/test/qs8-igemm-minmax.cc
+++ b/test/qs8-igemm-minmax.cc
@@ -29975,6 +29975,474 @@
 
 
 #if XNN_ARCH_ARM64
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_eq_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cn_stride(19)
+      .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_eq_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(8)
+          .iterations(1)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_eq_8_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t m = 1; m <= 4; m++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_eq_8_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(n)
+        .k(8)
+        .iterations(1)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_lt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_lt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k < 8; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_gt_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_gt_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 9; k < 16; k++) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_div_8) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, k_div_8_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 16; k <= 80; k += 8) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        for (uint32_t m = 1; m <= 4; m++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, small_kernel_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .ks(3)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_gt_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, n_div_16_small_kernel) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      for (uint32_t m = 1; m <= 4; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(4)
+            .nr(16)
+            .kr(4)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+        }
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, a_offset) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (size_t k = 1; k <= 40; k += 9) {
+      GemmMicrokernelTester()
+        .mr(4)
+        .nr(16)
+        .kr(4)
+        .sr(1)
+        .m(4)
+        .n(16)
+        .k(k)
+        .ks(3)
+        .a_offset(163)
+        .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, zero) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    for (uint32_t mz = 0; mz < 4; mz++) {
+      for (size_t k = 1; k <= 40; k += 9) {
+        GemmMicrokernelTester()
+          .mr(4)
+          .nr(16)
+          .kr(4)
+          .sr(1)
+          .m(4)
+          .n(16)
+          .k(k)
+          .ks(3)
+          .a_offset(163)
+          .zero_index(mz)
+          .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+      }
+    }
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, qmin) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmin(128)
+      .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, qmax) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .qmax(128)
+      .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+  }
+
+  TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_LD64, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_DOT;
+    GemmMicrokernelTester()
+      .mr(4)
+      .nr(16)
+      .kr(4)
+      .sr(1)
+      .m(4)
+      .n(16)
+      .k(8)
+      .cm_stride(19)
+      .Test(xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64);
+  }
+#endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM64
   TEST(QS8_IGEMM_MINMAX_4X16C4__AARCH64_NEONDOT_CORTEX_A55, k_eq_16) {
     TEST_REQUIRES_ARM_NEON_DOT;
     GemmMicrokernelTester()
diff --git a/test/qs8-igemm-minmax.yaml b/test/qs8-igemm-minmax.yaml
index d1609bf..cb7fda3 100644
--- a/test/qs8-igemm-minmax.yaml
+++ b/test/qs8-igemm-minmax.yaml
@@ -130,6 +130,8 @@
   k-block: 8
 - name: xnn_qs8_igemm_minmax_ukernel_8x16c4__neondot
   k-block: 8
+- name: xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64
+  k-block: 8
 - name: xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55
   k-block: 16
 - name: xnn_qs8_igemm_minmax_ukernel_1x4c2__sse2_ld64