xnn_qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal GEMM microkernel

Similar math to C16 but load order is different.

PiperOrigin-RevId: 362245222
diff --git a/bench/qs8-gemm-e2e.cc b/bench/qs8-gemm-e2e.cc
index 98b1eea..367381f 100644
--- a/bench/qs8-gemm-e2e.cc
+++ b/bench/qs8-gemm-e2e.cc
@@ -124,6 +124,24 @@
       benchmark::utils::CheckNEONDOT);
   }
 
+  static void qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mull_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mull_padal,
+      xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal,
+      xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal,
+      2 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
+    GEMMEnd2EndBenchmark(state, model,
+      xnn_qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal,
+      xnn_qs8_igemm_minmax_ukernel_2x8c8__neon_mull_padal,
+      xnn_qs8_gemm_minmax_ukernel_1x8c8__neon_mlal_padal,
+      xnn_qs8_igemm_minmax_ukernel_1x8c8__neon_mlal_padal,
+      2 /* mr */, 8  /* nr */, 3 /* log2_kr */, 0 /* log2_sr */,
+      benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal(benchmark::State& state, models::ExecutionPlanFactory model) {
     GEMMEnd2EndBenchmark(state, model,
       xnn_qs8_gemm_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal,
@@ -141,6 +159,8 @@
   BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_cortex_a55)
   BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld32)
   BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64)
+  BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mull_padal)
+  BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal)
   BENCHMARK_QS8_END2END(qs8_gemm_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal)
 #endif  // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
 
diff --git a/bench/qs8-gemm.cc b/bench/qs8-gemm.cc
index 639d890..8a0d8f1 100644
--- a/bench/qs8-gemm.cc
+++ b/bench/qs8-gemm.cc
@@ -559,6 +559,12 @@
   static void qs8_gemm_4x16c4__aarch64_neondot_ld64(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64, 4, 16, 4, 1, benchmark::utils::CheckNEONDOT);
   }
+  static void qs8_gemm_2x8c8__aarch64_neon_mull_padal(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mull_padal, 2, 8, 8, 1, benchmark::utils::CheckNEON);
+  }
+  static void qs8_gemm_2x8c8__aarch64_neon_mlal_padal(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x8c8__aarch64_neon_mlal_padal, 2, 8, 8, 1, benchmark::utils::CheckNEON);
+  }
   static void qs8_gemm_2x8c16__aarch64_neon_mlal_padal(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_qs8_gemm_minmax_ukernel_2x8c16__aarch64_neon_mlal_padal, 2, 8, 16, 1, benchmark::utils::CheckNEON);
   }
@@ -568,6 +574,8 @@
   BENCHMARK_GEMM(qs8_gemm_4x16c4__aarch64_neondot_ld32)
   BENCHMARK_GEMM(qs8_gemm_4x16c4__aarch64_neondot_ld64)
   BENCHMARK_GEMM(qs8_gemm_4x16c4__aarch64_neondot_cortex_a55)
+  BENCHMARK_GEMM(qs8_gemm_2x8c8__aarch64_neon_mull_padal)
+  BENCHMARK_GEMM(qs8_gemm_2x8c8__aarch64_neon_mlal_padal)
   BENCHMARK_GEMM(qs8_gemm_2x8c16__aarch64_neon_mlal_padal)
 #endif  // XNN_ARCH_ARM64