Rename unroll to x for SpMM microkernels with unrolled loop

PiperOrigin-RevId: 339990626
diff --git a/bench/f16-spmm.cc b/bench/f16-spmm.cc
index 248fd33..5f28191 100644
--- a/bench/f16-spmm.cc
+++ b/bench/f16-spmm.cc
@@ -168,36 +168,36 @@
   static void spmm80_8x1__neonfp16arith(benchmark::State& state, const char* net) {
     SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith, 8, 1, 0.8f);
   }
-  static void spmm80_8x1__neonfp16arith_unroll2(benchmark::State& state, const char* net) {
-    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_unroll2, 8, 1, 0.8f);
+  static void spmm80_8x1__neonfp16arith_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_8x1__neonfp16arith_x2, 8, 1, 0.8f);
   }
   static void spmm80_16x1__neonfp16arith(benchmark::State& state, const char* net) {
     SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith, 16, 1, 0.8f);
   }
-  static void spmm80_16x1__neonfp16arith_unroll2(benchmark::State& state, const char* net) {
-    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_unroll2, 16, 1, 0.8f);
+  static void spmm80_16x1__neonfp16arith_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_16x1__neonfp16arith_x2, 16, 1, 0.8f);
   }
   static void spmm80_24x1__neonfp16arith(benchmark::State& state, const char* net) {
     SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith, 24, 1, 0.8f);
   }
-  static void spmm80_24x1__neonfp16arith_unroll2(benchmark::State& state, const char* net) {
-    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_unroll2, 24, 1, 0.8f);
+  static void spmm80_24x1__neonfp16arith_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_24x1__neonfp16arith_x2, 24, 1, 0.8f);
   }
   static void spmm80_32x1__neonfp16arith(benchmark::State& state, const char* net) {
     SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith, 32, 1, 0.8f);
   }
-  static void spmm80_32x1__neonfp16arith_unroll2(benchmark::State& state, const char* net) {
-    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_unroll2, 32, 1, 0.8f);
+  static void spmm80_32x1__neonfp16arith_x2(benchmark::State& state, const char* net) {
+    SpMMBenchmark(state, xnn_f16_spmm_minmax_ukernel_32x1__neonfp16arith_x2, 32, 1, 0.8f);
   }
 
   BENCHMARK_GEMM(spmm80_8x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_8x1__neonfp16arith_unroll2)
+  BENCHMARK_GEMM(spmm80_8x1__neonfp16arith_x2)
   BENCHMARK_GEMM(spmm80_16x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_16x1__neonfp16arith_unroll2)
+  BENCHMARK_GEMM(spmm80_16x1__neonfp16arith_x2)
   BENCHMARK_GEMM(spmm80_24x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_24x1__neonfp16arith_unroll2)
+  BENCHMARK_GEMM(spmm80_24x1__neonfp16arith_x2)
   BENCHMARK_GEMM(spmm80_32x1__neonfp16arith)
-  BENCHMARK_GEMM(spmm80_32x1__neonfp16arith_unroll2)
+  BENCHMARK_GEMM(spmm80_32x1__neonfp16arith_x2)
 #endif  // XNN_ARCH_ARM64
 
 #ifndef XNNPACK_BENCHMARK_NO_MAIN