FP16 6x16 GEMM for Cortex A55r1.

PiperOrigin-RevId: 377952234
diff --git a/BUILD.bazel b/BUILD.bazel
index 209c8c2..13def0a 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -4047,6 +4047,7 @@
     "src/f16-gemm/gen-inc/6x8inc-minmax-aarch64-neonfp16arith-ld64.S",
     "src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-ld32.S",
     "src/f16-gemm/gen-inc/8x8inc-minmax-aarch64-neonfp16arith-ld64.S",
+    "src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S",
     "src/f16-gemm/gen/1x8-minmax-aarch64-neonfp16arith-ld64.S",
     "src/f16-gemm/gen/1x16-minmax-aarch64-neonfp16arith-ld32.S",
     "src/f16-gemm/gen/4x8-minmax-aarch64-neonfp16arith-ld64.S",
@@ -4054,6 +4055,7 @@
     "src/f16-gemm/gen/6x8-minmax-aarch64-neonfp16arith-ld64.S",
     "src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-ld32.S",
     "src/f16-gemm/gen/8x8-minmax-aarch64-neonfp16arith-ld64.S",
+    "src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S",
     "src/f32-dwconv/up4x9-minmax-aarch64-neonfma-cortex-a55.S",
     "src/f32-dwconv/up4x9-minmax-aarch64-neonfma.S",
     "src/f32-gemm/gen-inc/1x8inc-minmax-aarch64-neonfma-cortex-a53.S",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1c71c90..f429399 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3271,6 +3271,7 @@
   src/f16-gemm/gen-inc/6x8inc-minmax-aarch64-neonfp16arith-ld64.S
   src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-ld32.S
   src/f16-gemm/gen-inc/8x8inc-minmax-aarch64-neonfp16arith-ld64.S
+  src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S
   src/f16-gemm/gen/1x8-minmax-aarch64-neonfp16arith-ld64.S
   src/f16-gemm/gen/1x16-minmax-aarch64-neonfp16arith-ld32.S
   src/f16-gemm/gen/4x8-minmax-aarch64-neonfp16arith-ld64.S
@@ -3278,6 +3279,7 @@
   src/f16-gemm/gen/6x8-minmax-aarch64-neonfp16arith-ld64.S
   src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-ld32.S
   src/f16-gemm/gen/8x8-minmax-aarch64-neonfp16arith-ld64.S
+  src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S
   src/f32-dwconv/up4x9-minmax-aarch64-neonfma-cortex-a55.S
   src/f32-dwconv/up4x9-minmax-aarch64-neonfma.S
   src/f32-gemm/gen-inc/1x8inc-minmax-aarch64-neonfma-cortex-a53.S
diff --git a/bench/f16-gemm.cc b/bench/f16-gemm.cc
index 2c7f301..441de27 100644
--- a/bench/f16-gemm.cc
+++ b/bench/f16-gemm.cc
@@ -165,6 +165,10 @@
     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32, 6, 16, 1, 1);
   }
 
+  static void f16_gemm_6x16__aarch64_neonfp16arith_cortex_a55(benchmark::State& state, const char* net) {
+    GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, 6, 16, 1, 1);
+  }
+
   static void f16_gemm_1x8__aarch64_neonfp16arith_ld64(benchmark::State& state, const char* net) {
     GEMMBenchmark(state, xnn_f16_gemm_minmax_ukernel_1x8__aarch64_neonfp16arith_ld64, 1, 8, 1, 1);
   }
@@ -184,6 +188,7 @@
   BENCHMARK_GEMM(f16_gemm_1x16__aarch64_neonfp16arith_ld32)
   BENCHMARK_GEMM(f16_gemm_4x16__aarch64_neonfp16arith_ld32)
   BENCHMARK_GEMM(f16_gemm_6x16__aarch64_neonfp16arith_ld32)
+  BENCHMARK_GEMM(f16_gemm_6x16__aarch64_neonfp16arith_cortex_a55)
   BENCHMARK_GEMM(f16_gemm_1x8__aarch64_neonfp16arith_ld64)
   BENCHMARK_GEMM(f16_gemm_4x8__aarch64_neonfp16arith_ld64)
   BENCHMARK_GEMM(f16_gemm_6x8__aarch64_neonfp16arith_ld64)
diff --git a/scripts/generate-f16-gemm.sh b/scripts/generate-f16-gemm.sh
index 443af5a..2ff4751 100755
--- a/scripts/generate-f16-gemm.sh
+++ b/scripts/generate-f16-gemm.sh
@@ -21,6 +21,10 @@
 tools/xngen src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in -D INC=1 -o src/f16-gemm/gen-inc/6x8inc-minmax-aarch64-neonfp16arith-ld64.S
 tools/xngen src/f16-gemm/8x8-aarch64-neonfp16arith-ld64.S.in -D INC=1 -o src/f16-gemm/gen-inc/8x8inc-minmax-aarch64-neonfp16arith-ld64.S
 
+### Cortex A55 micro-kernels
+tools/xngen src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in -D INC=0 -o src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S
+tools/xngen src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in -D INC=1 -o src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S
+
 ########################## ARM NEON with FP16 compute #########################
 ### LD64 micro-kernels
 tools/xngen src/f16-gemm/neonfp16arith-ld64.c.in -D MR=1 -D NR=8 -D INC=0 -o src/f16-gemm/gen/1x8-minmax-neonfp16arith-ld64.c
diff --git a/src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in b/src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in
new file mode 100644
index 0000000..ab0eff4
--- /dev/null
+++ b/src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in
@@ -0,0 +1,432 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55(
+#     size_t mr,                x0
+#     size_t nc,                x1
+#     size_t kc,                x2 / x0
+#     const uint8_t*restrict a, x3
+#     size_t a_stride,          x4
+#     const void*restrict w,    x5
+#     uint8_t*restrict c,       x6
+#     size_t cm_stride,         x7
+#     size_t cn_stride,         [sp] -> (x0)
+$if INC:
+  #     const float*restrict acc,  [sp + 8] -> x15
+  #     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 16] -> x8
+$else:
+  #     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 8] -> x8
+
+# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
+
+# A pointers
+#  x3 a0
+#  x9 a1
+# x10 a2
+# x11 a3
+# x12 a4
+#  x4 a5
+
+# C pointers
+#  x6 c0
+# x16 c1
+# x17 c2
+# x14 c3
+# x13 c4
+#  x7 c5
+
+# Vector register usage
+# A0   v0
+# A1   v1
+# A2   v2
+# A3   v3
+# A4   v4
+# A5   v5
+# B   v16 v17 v18 v19
+# C   v20 v21
+# C   v22 v23
+# C   v24 v25
+# C   v26 v27
+# C   v28 v29
+# C   v30 v31
+# Clamp v6, (v4), (v5)
+# unused A   v8 v9 v10 v11
+# unused B   v12 v13 v14 v15
+
+
+BEGIN_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+        $if INC:
+          # Load acc, params pointer
+          LDP     x15, x8, [sp, 8]
+        $else:
+          # Load params pointer
+          LDR     x8, [sp, 8]
+
+        # Clamp A and C pointers
+        CMP     x0, 2                   // if mr < 2
+        ADD     x9, x3, x4              // a1 = a0 + a_stride
+        ADD     x16, x6, x7             // c1 = c0 + cm_stride
+        CSEL    x9, x3, x9, LO          //   a1 = a0
+        CSEL    x16, x6, x16, LO        //   c1 = c0
+
+        ADD     x10, x9, x4             // a2 = a1 + a_stride
+        ADD     x17, x16, x7            // c2 = c1 + cm_stride
+                                        // if mr <= 2
+        CSEL    x10, x9, x10, LS        //   a2 = a1
+        CSEL    x17, x16, x17, LS       //   c2 = c1
+
+        CMP     x0, 4                   // if mr < 4
+        ADD     x11, x10, x4            // a3 = a2 + a_stride
+        ADD     x14, x17, x7            // c3 = c2 + cm_stride
+        CSEL    x11, x10, x11, LO       //   a3 = a2
+        CSEL    x14, x17, x14, LO       //   c3 = c2
+
+        ADD     x12, x11, x4            // a4 = a3 + a_stride
+        ADD     x13, x14, x7            // c4 = c3 + cm_stride
+                                        // if mr <= 4
+        CSEL    x12, x11, x12, LS       //   a4 = a3
+        CSEL    x13, x14, x13, LS       //   c4 = c3
+
+        CMP     x0, 6                   // if mr < 6
+        ADD     x4, x12, x4             // a5 = a4 + a_stride
+        ADD     x7, x13, x7             // c5 = c4 + cm_stride
+        CSEL    x4, x12, x4, LO         //   a5 = a4
+        CSEL    x7, x13, x7, LO         //   c5 = c4
+
+        # Load params scale value
+        LD1R    {v6.8h}, [x8]
+        ADD     x8, x8, 2
+
+0:
+        $if INC:
+          # Load initial accumulators
+          LDP     q20, q21, [x15], 32
+          LDP     q22, q23, [x15], 32
+          LDP     q24, q25, [x15], 32
+          LDP     q26, q27, [x15], 32
+          LDP     q28, q29, [x15], 32
+          LDP     q30, q31, [x15], 32
+        $else:
+          # Load initial bias from w into accumulators
+          LDP     q20, q21, [x5], 32
+          MOV     v22.16b, v20.16b
+          MOV     v23.16b, v21.16b
+          MOV     v24.16b, v20.16b
+          MOV     v25.16b, v21.16b
+          MOV     v26.16b, v20.16b
+          MOV     v27.16b, v21.16b
+          MOV     v28.16b, v20.16b
+          MOV     v29.16b, v21.16b
+          MOV     v30.16b, v20.16b
+          MOV     v31.16b, v21.16b
+
+        # Is there at least 2 halffloats (4 bytes)?
+        SUBS    x0, x2, 4               // k = kc - 4
+        B.LO    4f
+
+        # Prologue - load 6 A and 2 B
+
+        LDR     s0,  [x3], 4
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     s1,  [x9], 4
+        LDR     s2, [x10], 4
+        LDR     s3, [x11], 4
+        LDR     s4, [x12], 4
+        LDR     s5,  [x4], 4
+
+        # Is there at least 2 halffloats for main loop?
+        SUBS    x0, x0, 4
+        B.LO    2f
+
+        # Main loop - 2 halffloats of A (4 bytes)
+        # 24 FMA + 6 ld32 A + 4 LDR B
+1:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        LDR     q18, [x5], 16
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        LDR     q19, [x5], 16
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        SUBS    x0, x0, 4
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        LDR     q16, [x5], 16
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        LDR     q17, [x5], 16
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        LDR     s0,  [x3], 4
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        LDR     s1,  [x9], 4
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        LDR     s2, [x10], 4
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        LDR     s3, [x11], 4
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        LDR     s4, [x12], 4
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+        LDR     s5,  [x4], 4
+        B.HS    1b
+
+        # Epilogue - same as main loop but no loads for next loop
+2:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        LDR     q18, [x5], 16
+        LDR     q19, [x5], 16
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+
+        # Is there a remainder?- 1 halffloat of A (2 bytes)
+        TBNZ    x0, 1, 4f
+3:
+        # Scale and Clamp
+        FMUL    v20.8h, v20.8h, v6.8h
+        # Load params values
+        LD2R    {v4.8h, v5.8h}, [x8]
+        FMUL    v21.8h, v21.8h, v6.8h
+        FMUL    v22.8h, v22.8h, v6.8h
+        FMUL    v23.8h, v23.8h, v6.8h
+        FMUL    v24.8h, v24.8h, v6.8h
+        FMUL    v25.8h, v25.8h, v6.8h
+        FMUL    v26.8h, v26.8h, v6.8h
+        FMUL    v27.8h, v27.8h, v6.8h
+        FMUL    v28.8h, v28.8h, v6.8h
+        FMUL    v29.8h, v29.8h, v6.8h
+        FMUL    v30.8h, v30.8h, v6.8h
+        FMUL    v31.8h, v31.8h, v6.8h
+        # Load cn_stride
+        LDR     x0, [sp, 0]
+        FMAX    v20.8h, v20.8h, v4.8h
+        FMAX    v21.8h, v21.8h, v4.8h
+        FMAX    v22.8h, v22.8h, v4.8h
+        FMAX    v23.8h, v23.8h, v4.8h
+        FMAX    v24.8h, v24.8h, v4.8h
+        FMAX    v25.8h, v25.8h, v4.8h
+        FMAX    v26.8h, v26.8h, v4.8h
+        FMAX    v27.8h, v27.8h, v4.8h
+        FMAX    v28.8h, v28.8h, v4.8h
+        FMAX    v29.8h, v29.8h, v4.8h
+        FMAX    v30.8h, v30.8h, v4.8h
+        FMAX    v31.8h, v31.8h, v4.8h
+        SUBS    x1, x1, 16
+        FMIN    v20.8h, v20.8h, v5.8h
+        FMIN    v21.8h, v21.8h, v5.8h
+        FMIN    v22.8h, v22.8h, v5.8h
+        FMIN    v23.8h, v23.8h, v5.8h
+        FMIN    v24.8h, v24.8h, v5.8h
+        FMIN    v25.8h, v25.8h, v5.8h
+        FMIN    v26.8h, v26.8h, v5.8h
+        FMIN    v27.8h, v27.8h, v5.8h
+        FMIN    v28.8h, v28.8h, v5.8h
+        FMIN    v29.8h, v29.8h, v5.8h
+        FMIN    v30.8h, v30.8h, v5.8h
+        FMIN    v31.8h, v31.8h, v5.8h
+
+        # Store full 6 x 16
+        B.LO    5f
+
+        $if INC:
+          ST1     {v30.16b, v31.16b},  [x7], x0
+          SUB     x3,  x3, x2             // a0 -= kc
+          ST1     {v28.16b, v29.16b}, [x13], x0
+          SUB     x9,  x9, x2             // a1 -= kc
+          ST1     {v26.16b, v27.16b}, [x14], x0
+          SUB     x10, x10, x2            // a2 -= kc
+          ST1     {v24.16b, v25.16b}, [x17], x0
+          SUB     x11, x11, x2            // a3 -= kc
+          ST1     {v22.16b, v23.16b}, [x16], x0
+          SUB     x12, x12, x2            // a4 -= kc
+          ST1     {v20.16b, v21.16b},  [x6], x0
+          SUB     x4,  x4, x2             // a5 -= kc
+        $else:
+          ST1     {v20.16b, v21.16b},  [x6], x0
+          SUB     x3,  x3, x2             // a0 -= kc
+          ST1     {v22.16b, v23.16b}, [x16], x0
+          SUB     x9,  x9, x2             // a1 -= kc
+          ST1     {v24.16b, v25.16b}, [x17], x0
+          SUB     x10, x10, x2            // a2 -= kc
+          ST1     {v26.16b, v27.16b}, [x14], x0
+          SUB     x11, x11, x2            // a3 -= kc
+          ST1     {v28.16b, v29.16b}, [x13], x0
+          SUB     x12, x12, x2            // a4 -= kc
+          ST1     {v30.16b, v31.16b},  [x7], x0
+          SUB     x4,  x4, x2             // a5 -= kc
+
+        B.HI    0b
+        RET
+
+4:
+        # Remainder- 1 halffloat of A (2 bytes)
+        LDR     h0,  [x3], 2
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     h1,  [x9], 2
+        LDR     h2, [x10], 2
+        LDR     h3, [x11], 2
+        LDR     h4, [x12], 2
+        LDR     h5,  [x4], 2
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+        B       3b
+
+        # Store odd width
+5:
+        TBZ     x1, 3, 6f
+        $if INC:
+          STR     q30,  [x7], 16
+          MOV     v30.16b, v31.16b
+          STR     q28, [x13], 16
+          MOV     v28.16b, v29.16b
+          STR     q26, [x14], 16
+          MOV     v26.16b, v27.16b
+          STR     q24, [x17], 16
+          MOV     v24.16b, v25.16b
+          STR     q22, [x16], 16
+          MOV     v22.16b, v23.16b
+          STR     q20,  [x6], 16
+          MOV     v20.16b, v21.16b
+        $else:
+          STR     q20,  [x6], 16
+          MOV     v20.16b, v21.16b
+          STR     q22, [x16], 16
+          MOV     v22.16b, v23.16b
+          STR     q24, [x17], 16
+          MOV     v24.16b, v25.16b
+          STR     q26, [x14], 16
+          MOV     v26.16b, v27.16b
+          STR     q28, [x13], 16
+          MOV     v28.16b, v29.16b
+          STR     q30,  [x7], 16
+          MOV     v30.16b, v31.16b
+
+6:
+        TBZ     x1, 2, 7f
+        $if INC:
+          STR     d30,  [x7], 8
+          DUP     d30, v30.d[1]
+          STR     d28, [x13], 8
+          DUP     d28, v28.d[1]
+          STR     d26, [x14], 8
+          DUP     d26, v26.d[1]
+          STR     d24, [x17], 8
+          DUP     d24, v24.d[1]
+          STR     d22, [x16], 8
+          DUP     d22, v22.d[1]
+          STR     d20,  [x6], 8
+          DUP     d20, v20.d[1]
+        $else:
+          STR     d20,  [x6], 8
+          DUP     d20, v20.d[1]
+          STR     d22, [x16], 8
+          DUP     d22, v22.d[1]
+          STR     d24, [x17], 8
+          DUP     d24, v24.d[1]
+          STR     d26, [x14], 8
+          DUP     d26, v26.d[1]
+          STR     d28, [x13], 8
+          DUP     d28, v28.d[1]
+          STR     d30,  [x7], 8
+          DUP     d30, v30.d[1]
+
+7:
+        TBZ     x1, 1, 8f
+        $if INC:
+          STR     s30,  [x7], 4
+          DUP     s30, v30.s[1]
+          STR     s28, [x13], 4
+          DUP     s28, v28.s[1]
+          STR     s26, [x14], 4
+          DUP     s26, v26.s[1]
+          STR     s24, [x17], 4
+          DUP     s24, v24.s[1]
+          STR     s22, [x16], 4
+          DUP     s22, v22.s[1]
+          STR     s20,  [x6], 4
+          DUP     s20, v20.s[1]
+        $else:
+          STR     s20,  [x6], 4
+          DUP     s20, v20.s[1]
+          STR     s22, [x16], 4
+          DUP     s22, v22.s[1]
+          STR     s24, [x17], 4
+          DUP     s24, v24.s[1]
+          STR     s26, [x14], 4
+          DUP     s26, v26.s[1]
+          STR     s28, [x13], 4
+          DUP     s28, v28.s[1]
+          STR     s30,  [x7], 4
+          DUP     s30, v30.s[1]
+
+8:
+        TBZ     x1, 0, 9f
+        $if INC:
+          STR     h30,  [x7]
+          STR     h28, [x13]
+          STR     h26, [x14]
+          STR     h24, [x17]
+          STR     h22, [x16]
+          STR     h20,  [x6]
+        $else:
+          STR     h20,  [x6]
+          STR     h22, [x16]
+          STR     h24, [x17]
+          STR     h26, [x14]
+          STR     h28, [x13]
+          STR     h30,  [x7]
+9:
+        RET
+
+END_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif
diff --git a/src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S b/src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S
new file mode 100644
index 0000000..60ab6de
--- /dev/null
+++ b/src/f16-gemm/gen-inc/6x16inc-minmax-aarch64-neonfp16arith-cortex-a55.S
@@ -0,0 +1,351 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_f16_gemminc_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55(
+#     size_t mr,                x0
+#     size_t nc,                x1
+#     size_t kc,                x2 / x0
+#     const uint8_t*restrict a, x3
+#     size_t a_stride,          x4
+#     const void*restrict w,    x5
+#     uint8_t*restrict c,       x6
+#     size_t cm_stride,         x7
+#     size_t cn_stride,         [sp] -> (x0)
+#     const float*restrict acc,  [sp + 8] -> x15
+#     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 16] -> x8
+
+# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
+
+# A pointers
+#  x3 a0
+#  x9 a1
+# x10 a2
+# x11 a3
+# x12 a4
+#  x4 a5
+
+# C pointers
+#  x6 c0
+# x16 c1
+# x17 c2
+# x14 c3
+# x13 c4
+#  x7 c5
+
+# Vector register usage
+# A0   v0
+# A1   v1
+# A2   v2
+# A3   v3
+# A4   v4
+# A5   v5
+# B   v16 v17 v18 v19
+# C   v20 v21
+# C   v22 v23
+# C   v24 v25
+# C   v26 v27
+# C   v28 v29
+# C   v30 v31
+# Clamp v6, (v4), (v5)
+# unused A   v8 v9 v10 v11
+# unused B   v12 v13 v14 v15
+
+
+BEGIN_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+        # Load acc, params pointer
+        LDP     x15, x8, [sp, 8]
+
+        # Clamp A and C pointers
+        CMP     x0, 2                   // if mr < 2
+        ADD     x9, x3, x4              // a1 = a0 + a_stride
+        ADD     x16, x6, x7             // c1 = c0 + cm_stride
+        CSEL    x9, x3, x9, LO          //   a1 = a0
+        CSEL    x16, x6, x16, LO        //   c1 = c0
+
+        ADD     x10, x9, x4             // a2 = a1 + a_stride
+        ADD     x17, x16, x7            // c2 = c1 + cm_stride
+                                        // if mr <= 2
+        CSEL    x10, x9, x10, LS        //   a2 = a1
+        CSEL    x17, x16, x17, LS       //   c2 = c1
+
+        CMP     x0, 4                   // if mr < 4
+        ADD     x11, x10, x4            // a3 = a2 + a_stride
+        ADD     x14, x17, x7            // c3 = c2 + cm_stride
+        CSEL    x11, x10, x11, LO       //   a3 = a2
+        CSEL    x14, x17, x14, LO       //   c3 = c2
+
+        ADD     x12, x11, x4            // a4 = a3 + a_stride
+        ADD     x13, x14, x7            // c4 = c3 + cm_stride
+                                        // if mr <= 4
+        CSEL    x12, x11, x12, LS       //   a4 = a3
+        CSEL    x13, x14, x13, LS       //   c4 = c3
+
+        CMP     x0, 6                   // if mr < 6
+        ADD     x4, x12, x4             // a5 = a4 + a_stride
+        ADD     x7, x13, x7             // c5 = c4 + cm_stride
+        CSEL    x4, x12, x4, LO         //   a5 = a4
+        CSEL    x7, x13, x7, LO         //   c5 = c4
+
+        # Load params scale value
+        LD1R    {v6.8h}, [x8]
+        ADD     x8, x8, 2
+
+0:
+        # Load initial accumulators
+        LDP     q20, q21, [x15], 32
+        LDP     q22, q23, [x15], 32
+        LDP     q24, q25, [x15], 32
+        LDP     q26, q27, [x15], 32
+        LDP     q28, q29, [x15], 32
+        LDP     q30, q31, [x15], 32
+
+        # Is there at least 2 halffloats (4 bytes)?
+        SUBS    x0, x2, 4               // k = kc - 4
+        B.LO    4f
+
+        # Prologue - load 6 A and 2 B
+
+        LDR     s0,  [x3], 4
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     s1,  [x9], 4
+        LDR     s2, [x10], 4
+        LDR     s3, [x11], 4
+        LDR     s4, [x12], 4
+        LDR     s5,  [x4], 4
+
+        # Is there at least 2 halffloats for main loop?
+        SUBS    x0, x0, 4
+        B.LO    2f
+
+        # Main loop - 2 halffloats of A (4 bytes)
+        # 24 FMA + 6 ld32 A + 4 LDR B
+1:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        LDR     q18, [x5], 16
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        LDR     q19, [x5], 16
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        SUBS    x0, x0, 4
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        LDR     q16, [x5], 16
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        LDR     q17, [x5], 16
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        LDR     s0,  [x3], 4
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        LDR     s1,  [x9], 4
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        LDR     s2, [x10], 4
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        LDR     s3, [x11], 4
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        LDR     s4, [x12], 4
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+        LDR     s5,  [x4], 4
+        B.HS    1b
+
+        # Epilogue - same as main loop but no loads for next loop
+2:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        LDR     q18, [x5], 16
+        LDR     q19, [x5], 16
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+
+        # Is there a remainder?- 1 halffloat of A (2 bytes)
+        TBNZ    x0, 1, 4f
+3:
+        # Scale and Clamp
+        FMUL    v20.8h, v20.8h, v6.8h
+        # Load params values
+        LD2R    {v4.8h, v5.8h}, [x8]
+        FMUL    v21.8h, v21.8h, v6.8h
+        FMUL    v22.8h, v22.8h, v6.8h
+        FMUL    v23.8h, v23.8h, v6.8h
+        FMUL    v24.8h, v24.8h, v6.8h
+        FMUL    v25.8h, v25.8h, v6.8h
+        FMUL    v26.8h, v26.8h, v6.8h
+        FMUL    v27.8h, v27.8h, v6.8h
+        FMUL    v28.8h, v28.8h, v6.8h
+        FMUL    v29.8h, v29.8h, v6.8h
+        FMUL    v30.8h, v30.8h, v6.8h
+        FMUL    v31.8h, v31.8h, v6.8h
+        # Load cn_stride
+        LDR     x0, [sp, 0]
+        FMAX    v20.8h, v20.8h, v4.8h
+        FMAX    v21.8h, v21.8h, v4.8h
+        FMAX    v22.8h, v22.8h, v4.8h
+        FMAX    v23.8h, v23.8h, v4.8h
+        FMAX    v24.8h, v24.8h, v4.8h
+        FMAX    v25.8h, v25.8h, v4.8h
+        FMAX    v26.8h, v26.8h, v4.8h
+        FMAX    v27.8h, v27.8h, v4.8h
+        FMAX    v28.8h, v28.8h, v4.8h
+        FMAX    v29.8h, v29.8h, v4.8h
+        FMAX    v30.8h, v30.8h, v4.8h
+        FMAX    v31.8h, v31.8h, v4.8h
+        SUBS    x1, x1, 16
+        FMIN    v20.8h, v20.8h, v5.8h
+        FMIN    v21.8h, v21.8h, v5.8h
+        FMIN    v22.8h, v22.8h, v5.8h
+        FMIN    v23.8h, v23.8h, v5.8h
+        FMIN    v24.8h, v24.8h, v5.8h
+        FMIN    v25.8h, v25.8h, v5.8h
+        FMIN    v26.8h, v26.8h, v5.8h
+        FMIN    v27.8h, v27.8h, v5.8h
+        FMIN    v28.8h, v28.8h, v5.8h
+        FMIN    v29.8h, v29.8h, v5.8h
+        FMIN    v30.8h, v30.8h, v5.8h
+        FMIN    v31.8h, v31.8h, v5.8h
+
+        # Store full 6 x 16
+        B.LO    5f
+
+        ST1     {v30.16b, v31.16b},  [x7], x0
+        SUB     x3,  x3, x2             // a0 -= kc
+        ST1     {v28.16b, v29.16b}, [x13], x0
+        SUB     x9,  x9, x2             // a1 -= kc
+        ST1     {v26.16b, v27.16b}, [x14], x0
+        SUB     x10, x10, x2            // a2 -= kc
+        ST1     {v24.16b, v25.16b}, [x17], x0
+        SUB     x11, x11, x2            // a3 -= kc
+        ST1     {v22.16b, v23.16b}, [x16], x0
+        SUB     x12, x12, x2            // a4 -= kc
+        ST1     {v20.16b, v21.16b},  [x6], x0
+        SUB     x4,  x4, x2             // a5 -= kc
+
+        B.HI    0b
+        RET
+
+4:
+        # Remainder- 1 halffloat of A (2 bytes)
+        LDR     h0,  [x3], 2
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     h1,  [x9], 2
+        LDR     h2, [x10], 2
+        LDR     h3, [x11], 2
+        LDR     h4, [x12], 2
+        LDR     h5,  [x4], 2
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+        B       3b
+
+        # Store odd width
+5:
+        TBZ     x1, 3, 6f
+        STR     q30,  [x7], 16
+        MOV     v30.16b, v31.16b
+        STR     q28, [x13], 16
+        MOV     v28.16b, v29.16b
+        STR     q26, [x14], 16
+        MOV     v26.16b, v27.16b
+        STR     q24, [x17], 16
+        MOV     v24.16b, v25.16b
+        STR     q22, [x16], 16
+        MOV     v22.16b, v23.16b
+        STR     q20,  [x6], 16
+        MOV     v20.16b, v21.16b
+
+6:
+        TBZ     x1, 2, 7f
+        STR     d30,  [x7], 8
+        DUP     d30, v30.d[1]
+        STR     d28, [x13], 8
+        DUP     d28, v28.d[1]
+        STR     d26, [x14], 8
+        DUP     d26, v26.d[1]
+        STR     d24, [x17], 8
+        DUP     d24, v24.d[1]
+        STR     d22, [x16], 8
+        DUP     d22, v22.d[1]
+        STR     d20,  [x6], 8
+        DUP     d20, v20.d[1]
+
+7:
+        TBZ     x1, 1, 8f
+        STR     s30,  [x7], 4
+        DUP     s30, v30.s[1]
+        STR     s28, [x13], 4
+        DUP     s28, v28.s[1]
+        STR     s26, [x14], 4
+        DUP     s26, v26.s[1]
+        STR     s24, [x17], 4
+        DUP     s24, v24.s[1]
+        STR     s22, [x16], 4
+        DUP     s22, v22.s[1]
+        STR     s20,  [x6], 4
+        DUP     s20, v20.s[1]
+
+8:
+        TBZ     x1, 0, 9f
+        STR     h30,  [x7]
+        STR     h28, [x13]
+        STR     h26, [x14]
+        STR     h24, [x17]
+        STR     h22, [x16]
+        STR     h20,  [x6]
+9:
+        RET
+
+END_FUNCTION xnn_f16_gemminc_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif
diff --git a/src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S b/src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S
new file mode 100644
index 0000000..b325b02
--- /dev/null
+++ b/src/f16-gemm/gen/6x16-minmax-aarch64-neonfp16arith-cortex-a55.S
@@ -0,0 +1,355 @@
+// Auto-generated file. Do not edit!
+//   Template: src/f16-gemm/6x16-aarch64-neonfp16arith-cortex-a55.S.in
+//   Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55(
+#     size_t mr,                x0
+#     size_t nc,                x1
+#     size_t kc,                x2 / x0
+#     const uint8_t*restrict a, x3
+#     size_t a_stride,          x4
+#     const void*restrict w,    x5
+#     uint8_t*restrict c,       x6
+#     size_t cm_stride,         x7
+#     size_t cn_stride,         [sp] -> (x0)
+#     const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)])  [sp + 8] -> x8
+
+# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
+
+# A pointers
+#  x3 a0
+#  x9 a1
+# x10 a2
+# x11 a3
+# x12 a4
+#  x4 a5
+
+# C pointers
+#  x6 c0
+# x16 c1
+# x17 c2
+# x14 c3
+# x13 c4
+#  x7 c5
+
+# Vector register usage
+# A0   v0
+# A1   v1
+# A2   v2
+# A3   v3
+# A4   v4
+# A5   v5
+# B   v16 v17 v18 v19
+# C   v20 v21
+# C   v22 v23
+# C   v24 v25
+# C   v26 v27
+# C   v28 v29
+# C   v30 v31
+# Clamp v6, (v4), (v5)
+# unused A   v8 v9 v10 v11
+# unused B   v12 v13 v14 v15
+
+
+BEGIN_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+        # Load params pointer
+        LDR     x8, [sp, 8]
+
+        # Clamp A and C pointers
+        CMP     x0, 2                   // if mr < 2
+        ADD     x9, x3, x4              // a1 = a0 + a_stride
+        ADD     x16, x6, x7             // c1 = c0 + cm_stride
+        CSEL    x9, x3, x9, LO          //   a1 = a0
+        CSEL    x16, x6, x16, LO        //   c1 = c0
+
+        ADD     x10, x9, x4             // a2 = a1 + a_stride
+        ADD     x17, x16, x7            // c2 = c1 + cm_stride
+                                        // if mr <= 2
+        CSEL    x10, x9, x10, LS        //   a2 = a1
+        CSEL    x17, x16, x17, LS       //   c2 = c1
+
+        CMP     x0, 4                   // if mr < 4
+        ADD     x11, x10, x4            // a3 = a2 + a_stride
+        ADD     x14, x17, x7            // c3 = c2 + cm_stride
+        CSEL    x11, x10, x11, LO       //   a3 = a2
+        CSEL    x14, x17, x14, LO       //   c3 = c2
+
+        ADD     x12, x11, x4            // a4 = a3 + a_stride
+        ADD     x13, x14, x7            // c4 = c3 + cm_stride
+                                        // if mr <= 4
+        CSEL    x12, x11, x12, LS       //   a4 = a3
+        CSEL    x13, x14, x13, LS       //   c4 = c3
+
+        CMP     x0, 6                   // if mr < 6
+        ADD     x4, x12, x4             // a5 = a4 + a_stride
+        ADD     x7, x13, x7             // c5 = c4 + cm_stride
+        CSEL    x4, x12, x4, LO         //   a5 = a4
+        CSEL    x7, x13, x7, LO         //   c5 = c4
+
+        # Load params scale value
+        LD1R    {v6.8h}, [x8]
+        ADD     x8, x8, 2
+
+0:
+        # Load initial bias from w into accumulators
+        LDP     q20, q21, [x5], 32
+        MOV     v22.16b, v20.16b
+        MOV     v23.16b, v21.16b
+        MOV     v24.16b, v20.16b
+        MOV     v25.16b, v21.16b
+        MOV     v26.16b, v20.16b
+        MOV     v27.16b, v21.16b
+        MOV     v28.16b, v20.16b
+        MOV     v29.16b, v21.16b
+        MOV     v30.16b, v20.16b
+        MOV     v31.16b, v21.16b
+
+        # Is there at least 2 halffloats (4 bytes)?
+        SUBS    x0, x2, 4               // k = kc - 4
+        B.LO    4f
+
+        # Prologue - load 6 A and 2 B
+
+        LDR     s0,  [x3], 4
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     s1,  [x9], 4
+        LDR     s2, [x10], 4
+        LDR     s3, [x11], 4
+        LDR     s4, [x12], 4
+        LDR     s5,  [x4], 4
+
+        # Is there at least 2 halffloats for main loop?
+        SUBS    x0, x0, 4
+        B.LO    2f
+
+        # Main loop - 2 halffloats of A (4 bytes)
+        # 24 FMA + 6 ld32 A + 4 LDR B
+1:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        LDR     q18, [x5], 16
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        LDR     q19, [x5], 16
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        SUBS    x0, x0, 4
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        LDR     q16, [x5], 16
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        LDR     q17, [x5], 16
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        LDR     s0,  [x3], 4
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        LDR     s1,  [x9], 4
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        LDR     s2, [x10], 4
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        LDR     s3, [x11], 4
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        LDR     s4, [x12], 4
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+        LDR     s5,  [x4], 4
+        B.HS    1b
+
+        # Epilogue - same as main loop but no loads for next loop
+2:
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        LDR     q18, [x5], 16
+        LDR     q19, [x5], 16
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+
+        FMLA    v20.8h, v18.8h,  v0.h[1]
+        FMLA    v22.8h, v18.8h,  v1.h[1]
+        FMLA    v24.8h, v18.8h,  v2.h[1]
+        FMLA    v26.8h, v18.8h,  v3.h[1]
+        FMLA    v28.8h, v18.8h,  v4.h[1]
+        FMLA    v30.8h, v18.8h,  v5.h[1]
+        FMLA    v21.8h, v19.8h,  v0.h[1]
+        FMLA    v23.8h, v19.8h,  v1.h[1]
+        FMLA    v25.8h, v19.8h,  v2.h[1]
+        FMLA    v27.8h, v19.8h,  v3.h[1]
+        FMLA    v29.8h, v19.8h,  v4.h[1]
+        FMLA    v31.8h, v19.8h,  v5.h[1]
+
+        # Is there a remainder?- 1 halffloat of A (2 bytes)
+        TBNZ    x0, 1, 4f
+3:
+        # Scale and Clamp
+        FMUL    v20.8h, v20.8h, v6.8h
+        # Load params values
+        LD2R    {v4.8h, v5.8h}, [x8]
+        FMUL    v21.8h, v21.8h, v6.8h
+        FMUL    v22.8h, v22.8h, v6.8h
+        FMUL    v23.8h, v23.8h, v6.8h
+        FMUL    v24.8h, v24.8h, v6.8h
+        FMUL    v25.8h, v25.8h, v6.8h
+        FMUL    v26.8h, v26.8h, v6.8h
+        FMUL    v27.8h, v27.8h, v6.8h
+        FMUL    v28.8h, v28.8h, v6.8h
+        FMUL    v29.8h, v29.8h, v6.8h
+        FMUL    v30.8h, v30.8h, v6.8h
+        FMUL    v31.8h, v31.8h, v6.8h
+        # Load cn_stride
+        LDR     x0, [sp, 0]
+        FMAX    v20.8h, v20.8h, v4.8h
+        FMAX    v21.8h, v21.8h, v4.8h
+        FMAX    v22.8h, v22.8h, v4.8h
+        FMAX    v23.8h, v23.8h, v4.8h
+        FMAX    v24.8h, v24.8h, v4.8h
+        FMAX    v25.8h, v25.8h, v4.8h
+        FMAX    v26.8h, v26.8h, v4.8h
+        FMAX    v27.8h, v27.8h, v4.8h
+        FMAX    v28.8h, v28.8h, v4.8h
+        FMAX    v29.8h, v29.8h, v4.8h
+        FMAX    v30.8h, v30.8h, v4.8h
+        FMAX    v31.8h, v31.8h, v4.8h
+        SUBS    x1, x1, 16
+        FMIN    v20.8h, v20.8h, v5.8h
+        FMIN    v21.8h, v21.8h, v5.8h
+        FMIN    v22.8h, v22.8h, v5.8h
+        FMIN    v23.8h, v23.8h, v5.8h
+        FMIN    v24.8h, v24.8h, v5.8h
+        FMIN    v25.8h, v25.8h, v5.8h
+        FMIN    v26.8h, v26.8h, v5.8h
+        FMIN    v27.8h, v27.8h, v5.8h
+        FMIN    v28.8h, v28.8h, v5.8h
+        FMIN    v29.8h, v29.8h, v5.8h
+        FMIN    v30.8h, v30.8h, v5.8h
+        FMIN    v31.8h, v31.8h, v5.8h
+
+        # Store full 6 x 16
+        B.LO    5f
+
+        ST1     {v20.16b, v21.16b},  [x6], x0
+        SUB     x3,  x3, x2             // a0 -= kc
+        ST1     {v22.16b, v23.16b}, [x16], x0
+        SUB     x9,  x9, x2             // a1 -= kc
+        ST1     {v24.16b, v25.16b}, [x17], x0
+        SUB     x10, x10, x2            // a2 -= kc
+        ST1     {v26.16b, v27.16b}, [x14], x0
+        SUB     x11, x11, x2            // a3 -= kc
+        ST1     {v28.16b, v29.16b}, [x13], x0
+        SUB     x12, x12, x2            // a4 -= kc
+        ST1     {v30.16b, v31.16b},  [x7], x0
+        SUB     x4,  x4, x2             // a5 -= kc
+
+        B.HI    0b
+        RET
+
+4:
+        # Remainder- 1 halffloat of A (2 bytes)
+        LDR     h0,  [x3], 2
+        LDR     q16, [x5], 16
+        LDR     q17, [x5], 16
+        LDR     h1,  [x9], 2
+        LDR     h2, [x10], 2
+        LDR     h3, [x11], 2
+        LDR     h4, [x12], 2
+        LDR     h5,  [x4], 2
+        FMLA    v20.8h, v16.8h,  v0.h[0]
+        FMLA    v22.8h, v16.8h,  v1.h[0]
+        FMLA    v24.8h, v16.8h,  v2.h[0]
+        FMLA    v26.8h, v16.8h,  v3.h[0]
+        FMLA    v28.8h, v16.8h,  v4.h[0]
+        FMLA    v30.8h, v16.8h,  v5.h[0]
+        FMLA    v21.8h, v17.8h,  v0.h[0]
+        FMLA    v23.8h, v17.8h,  v1.h[0]
+        FMLA    v25.8h, v17.8h,  v2.h[0]
+        FMLA    v27.8h, v17.8h,  v3.h[0]
+        FMLA    v29.8h, v17.8h,  v4.h[0]
+        FMLA    v31.8h, v17.8h,  v5.h[0]
+        B       3b
+
+        # Store odd width
+5:
+        TBZ     x1, 3, 6f
+        STR     q20,  [x6], 16
+        MOV     v20.16b, v21.16b
+        STR     q22, [x16], 16
+        MOV     v22.16b, v23.16b
+        STR     q24, [x17], 16
+        MOV     v24.16b, v25.16b
+        STR     q26, [x14], 16
+        MOV     v26.16b, v27.16b
+        STR     q28, [x13], 16
+        MOV     v28.16b, v29.16b
+        STR     q30,  [x7], 16
+        MOV     v30.16b, v31.16b
+
+6:
+        TBZ     x1, 2, 7f
+        STR     d20,  [x6], 8
+        DUP     d20, v20.d[1]
+        STR     d22, [x16], 8
+        DUP     d22, v22.d[1]
+        STR     d24, [x17], 8
+        DUP     d24, v24.d[1]
+        STR     d26, [x14], 8
+        DUP     d26, v26.d[1]
+        STR     d28, [x13], 8
+        DUP     d28, v28.d[1]
+        STR     d30,  [x7], 8
+        DUP     d30, v30.d[1]
+
+7:
+        TBZ     x1, 1, 8f
+        STR     s20,  [x6], 4
+        DUP     s20, v20.s[1]
+        STR     s22, [x16], 4
+        DUP     s22, v22.s[1]
+        STR     s24, [x17], 4
+        DUP     s24, v24.s[1]
+        STR     s26, [x14], 4
+        DUP     s26, v26.s[1]
+        STR     s28, [x13], 4
+        DUP     s28, v28.s[1]
+        STR     s30,  [x7], 4
+        DUP     s30, v30.s[1]
+
+8:
+        TBZ     x1, 0, 9f
+        STR     h20,  [x6]
+        STR     h22, [x16]
+        STR     h24, [x17]
+        STR     h26, [x14]
+        STR     h28, [x13]
+        STR     h30,  [x7]
+9:
+        RET
+
+END_FUNCTION xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index ce4f81a..a53ee2d 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -470,6 +470,7 @@
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_1x16__aarch64_neonfp16arith_ld32)
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_4x16__aarch64_neonfp16arith_ld32)
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32)
+DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55)
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_1x8__aarch64_neonfp16arith_ld64)
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_4x8__aarch64_neonfp16arith_ld64)
 DECLARE_F16_GEMM_MINMAX_UKERNEL_FUNCTION(xnn_f16_gemm_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64)
diff --git a/test/f16-gemm-minmax.cc b/test/f16-gemm-minmax.cc
index 8acd2ab..a942ca4 100644
--- a/test/f16-gemm-minmax.cc
+++ b/test/f16-gemm-minmax.cc
@@ -23,6 +23,462 @@
 
 
 #if XNN_ARCH_ARM64
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_eq_2) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, strided_cn) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .cn_stride(19)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_eq_2_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .a_stride(5)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_eq_2_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t m = 1; m <= 6; m++) {
+      for (uint32_t n = 1; n <= 16; n++) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(m)
+          .n(n)
+          .k(2)
+          .iterations(1)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_eq_2_subtile_m) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t m = 1; m <= 6; m++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(m)
+        .n(16)
+        .k(2)
+        .iterations(1)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_eq_2_subtile_n) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 1; n <= 16; n++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(n)
+        .k(2)
+        .iterations(1)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_lt_2) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 1; k < 2; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_lt_2_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 1; k < 2; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(5)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_lt_2_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 1; k < 2; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_gt_2) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 3; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_gt_2_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 3; k < 4; k++) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(7)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_gt_2_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 3; k < 4; k++) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_div_2) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 4; k <= 20; k += 2) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_div_2_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 4; k <= 20; k += 2) {
+      GemmMicrokernelTester()
+        .mr(6)
+        .nr(16)
+        .kr(1)
+        .sr(1)
+        .m(6)
+        .n(16)
+        .k(k)
+        .a_stride(23)
+        .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, k_div_2_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 4; k <= 20; k += 2) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_gt_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_gt_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_gt_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(13)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_gt_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 17; n < 32; n++) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_div_16) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(16)
+          .k(k)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_div_16_strided_cn) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .cn_stride(19)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_div_16_strided_a) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        GemmMicrokernelTester()
+          .mr(6)
+          .nr(16)
+          .kr(1)
+          .sr(1)
+          .m(6)
+          .n(n)
+          .k(k)
+          .a_stride(13)
+          .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, n_div_16_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (uint32_t n = 32; n <= 48; n += 16) {
+      for (size_t k = 1; k <= 10; k += 3) {
+        for (uint32_t m = 1; m <= 6; m++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, strided_cm_subtile) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    for (size_t k = 1; k <= 10; k += 3) {
+      for (uint32_t m = 1; m <= 6; m++) {
+        for (uint32_t n = 1; n <= 16; n++) {
+          GemmMicrokernelTester()
+            .mr(6)
+            .nr(16)
+            .kr(1)
+            .sr(1)
+            .m(m)
+            .n(n)
+            .k(k)
+            .cm_stride(19)
+            .iterations(1)
+            .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+        }
+      }
+    }
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, qmin) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .qmin(128)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, qmax) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .qmax(128)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+
+  TEST(F16_GEMM_MINMAX_6X16__AARCH64_NEONFP16ARITH_CORTEX_A55, strided_cm) {
+    TEST_REQUIRES_ARM_NEON_FP16_ARITH;
+    GemmMicrokernelTester()
+      .mr(6)
+      .nr(16)
+      .kr(1)
+      .sr(1)
+      .m(6)
+      .n(16)
+      .k(2)
+      .cm_stride(19)
+      .Test(xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55, xnn_init_f16_scaleminmax_params);
+  }
+#endif  // XNN_ARCH_ARM64
+
+
+#if XNN_ARCH_ARM64
   TEST(F16_GEMM_MINMAX_1X8__NEONFP16ARITH_LD64, k_eq_4) {
     TEST_REQUIRES_ARM_NEON_FP16_ARITH;
     GemmMicrokernelTester()
diff --git a/test/f16-gemm-minmax.yaml b/test/f16-gemm-minmax.yaml
index 67a8547..e51dd14 100644
--- a/test/f16-gemm-minmax.yaml
+++ b/test/f16-gemm-minmax.yaml
@@ -2,6 +2,11 @@
 #
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
+- name: xnn_f16_gemm_minmax_ukernel_6x16__aarch64_neonfp16arith_cortex_a55
+  init: xnn_init_f16_scaleminmax_params
+  k-block: 2
+  arch:
+  - aarch64
 - name: xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64
   init: xnn_init_f16_scaleminmax_params
   k-block: 4