FP16 4x8, 6x8 and 1x8 GEMM ld64 microkernels

PiperOrigin-RevId: 306697529
diff --git a/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in b/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
new file mode 100644
index 0000000..e3ae634
--- /dev/null
+++ b/src/f16-gemm/6x8-aarch64-neonfp16arith-ld64.S.in
@@ -0,0 +1,378 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64(
+#     size_t mr,                x0
+#     size_t nc,                x1
+#     size_t kc,                x2 / x0
+#     const uint8_t*restrict a, x3
+#     size_t a_stride,          x4
+#     const void*restrict w,    x5
+#     uint8_t*restrict c,       x6
+#     size_t cm_stride,         x7
+#     size_t cn_stride,         [sp] -> x14
+$if INC:
+  #     const float*restrict acc,  [sp + 8] -> x15
+  #     const union xnn_f16_output_params params[restrict static 1])  [sp + 16] -> x8
+$else:
+  #     const union xnn_f16_output_params params[restrict static 1])  [sp + 8] -> x8
+
+# d8-d15 need to be preserved if used.
+# x19-30 need to be preserved if used.
+
+# A pointers
+#  x3 a0
+#  x9 a1
+# x10 a2
+# x11 a3
+# x12 a4
+#  x4 a5
+
+# C pointers
+#  x6 c0
+# x16 c1
+# x17 c2
+# x18 c3
+# x13 c4
+#  x7 c5
+
+# Vector register usage
+# A0   v0
+# A1   v1
+# A2   v2
+# A3   v3
+# A4   v4
+# A5   v5
+# B   v16 v17 v18 v19
+# C   v20
+# C   v22
+# C   v24
+# C   v26
+# C   v28
+# C   v30
+# Clamp v6 v7
+# unused A   v8 v9 v10 v11
+# unused B   v12 v13 v14 v15
+
+# Clamp v6, (v4), (v5)
+
+BEGIN_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64
+        # Clamp A and C pointers
+        CMP x0, 2                // if mr < 2
+        ADD x9, x3, x4           // a1 = a0 + a_stride
+        ADD x16, x6, x7          // c1 = c0 + cm_stride
+        CSEL x9, x3, x9, LO      //   a1 = a0
+        CSEL x16, x6, x16, LO    //   c1 = c0
+
+        ADD x10, x9, x4          // a2 = a1 + a_stride
+        ADD x17, x16, x7         // c2 = c1 + cm_stride
+                                 // if mr <= 2
+        CSEL x10, x9, x10, LS    //   a2 = a1
+        CSEL x17, x16, x17, LS   //   c2 = c1
+
+        CMP x0, 4                // if mr < 4
+        ADD x11, x10, x4         // a3 = a2 + a_stride
+        ADD x18, x17, x7         // c3 = c2 + cm_stride
+        CSEL x11, x10, x11, LO   //   a3 = a2
+        CSEL x18, x17, x18, LO   //   c3 = c2
+
+        ADD x12, x11, x4         // a4 = a3 + a_stride
+        ADD x13, x18, x7         // c4 = c3 + cm_stride
+                                 // if mr <= 5
+        CSEL x12, x11, x12, LS   //   a4 = a3
+        CSEL x13, x18, x13, LS   //   c4 = c3
+
+        $if INC:
+          # Load acc, params pointer
+          LDP x15, x8, [sp, 8]
+        $else:
+          # Load params pointer
+          LDR x8, [sp, 8]
+
+        CMP x0, 6                // if mr < 6
+        ADD x4, x12, x4          // a5 = a4 + a_stride
+        ADD x7, x13, x7          // c5 = c4 + cm_stride
+        CSEL x4, x12, x4, LO     //   a5 = a4
+        CSEL x7, x13, x7, LO     //   c5 = c4
+
+        # Load params scale value
+        LD1R {v6.8h}, [x8]
+        ADD x8, x8, 2
+
+        # Load cn_stride
+        LDR x14, [sp]
+
+0:
+        $if INC:
+          # Load initial accumulators
+          LDP q20, q22, [x15], 32
+          LDP q24, q26, [x15], 32
+          LDP q28, q30, [x15], 32
+          #PRFM PLDL1KEEP, [x5, 0]  // Prefetch B
+          #PRFM PLDL1KEEP, [x5, 64]
+          #PRFM PLDL1KEEP, [x5, 128]
+          #PRFM PLDL1KEEP, [x5, 192]
+          #PRFM PLDL1KEEP,  [x3]    // Prefetch A
+          #PRFM PLDL1KEEP,  [x9]
+          #PRFM PLDL1KEEP, [x10]
+          #PRFM PLDL1KEEP, [x11]
+          #PRFM PLDL1KEEP, [x12]
+          #PRFM PLDL1KEEP,  [x4]
+        $else:
+          # Load initial bias from w into accumulators
+          LDR q20, [x5], 16
+          MOV v22.16b, v20.16b
+          #PRFM PLDL1KEEP, [x5, 0]  // Prefetch B
+          #PRFM PLDL1KEEP, [x5, 64]
+          MOV v24.16b, v20.16b
+          #PRFM PLDL1KEEP, [x5, 128]
+          #PRFM PLDL1KEEP, [x5, 192]
+          MOV v26.16b, v20.16b
+          #PRFM PLDL1KEEP,  [x3]    // Prefetch A
+          #PRFM PLDL1KEEP,  [x9]
+          MOV v28.16b, v20.16b
+          #PRFM PLDL1KEEP, [x10]
+          #PRFM PLDL1KEEP, [x11]
+          MOV v30.16b, v20.16b
+          #PRFM PLDL1KEEP, [x12]
+          #PRFM PLDL1KEEP,  [x4]
+
+         # Is there at least 4 halffloats (8 bytes)?
+        SUBS x0, x2, 8  // k = kc - 8
+        B.LO 5f
+
+        # Main loop - 4 halffloats of A (8 bytes)
+        # 24 FMA + 6 ld64 A + 2 LDP B
+1:
+        LDR   d0,  [x3], 8
+        LDP  q16,  q17, [x5], 32
+        LDR   d1,  [x9], 8
+        LDR   d2, [x10], 8
+        LDR   d3, [x11], 8
+        LDR   d4, [x12], 8
+        LDR   d5,  [x4], 8
+
+        FMLA v20.8h, v16.8h,  v0.h[0]
+        FMLA v22.8h, v16.8h,  v1.h[0]
+        FMLA v24.8h, v16.8h,  v2.h[0]
+        FMLA v26.8h, v16.8h,  v3.h[0]
+        FMLA v28.8h, v16.8h,  v4.h[0]
+        FMLA v30.8h, v16.8h,  v5.h[0]
+        LDP  q18,  q19, [x5], 32
+
+        FMLA v20.8h, v17.8h,  v0.h[1]
+        FMLA v22.8h, v17.8h,  v1.h[1]
+        FMLA v24.8h, v17.8h,  v2.h[1]
+        FMLA v26.8h, v17.8h,  v3.h[1]
+        FMLA v28.8h, v17.8h,  v4.h[1]
+        FMLA v30.8h, v17.8h,  v5.h[1]
+
+        FMLA v20.8h, v18.8h,  v0.h[2]
+        FMLA v22.8h, v18.8h,  v1.h[2]
+        FMLA v24.8h, v18.8h,  v2.h[2]
+        FMLA v26.8h, v18.8h,  v3.h[2]
+        FMLA v28.8h, v18.8h,  v4.h[2]
+        FMLA v30.8h, v18.8h,  v5.h[2]
+        SUBS x0, x0, 8
+
+        FMLA v20.8h, v19.8h,  v0.h[3]
+        FMLA v22.8h, v19.8h,  v1.h[3]
+        FMLA v24.8h, v19.8h,  v2.h[3]
+        FMLA v26.8h, v19.8h,  v3.h[3]
+        FMLA v28.8h, v19.8h,  v4.h[3]
+        FMLA v30.8h, v19.8h,  v5.h[3]
+        B.HS 1b
+
+        # Is there a remainder?- 2 halffloats of A (4 bytes)
+        TBNZ x0, 2, 6f
+        # Is there a remainder?- 1 halffloats of A (2 bytes)
+        TBNZ x0, 1, 7f
+4:
+        # Scale and Clamp
+        FMUL v20.8h, v20.8h, v6.8h
+        # Load params values
+        LD2R {v4.8h, v5.8h}, [x8]
+        FMUL v22.8h, v22.8h, v6.8h
+        FMUL v24.8h, v24.8h, v6.8h
+        FMUL v26.8h, v26.8h, v6.8h
+        FMUL v28.8h, v28.8h, v6.8h
+        FMUL v30.8h, v30.8h, v6.8h
+        SUBS x1, x1, 8
+        FMAX v20.8h, v20.8h, v4.8h
+        FMAX v22.8h, v22.8h, v4.8h
+        FMAX v24.8h, v24.8h, v4.8h
+        FMAX v26.8h, v26.8h, v4.8h
+        FMAX v28.8h, v28.8h, v4.8h
+        FMAX v30.8h, v30.8h, v4.8h
+        FMIN v20.8h, v20.8h, v5.8h
+        FMIN v22.8h, v22.8h, v5.8h
+        FMIN v24.8h, v24.8h, v5.8h
+        FMIN v26.8h, v26.8h, v5.8h
+        FMIN v28.8h, v28.8h, v5.8h
+        FMIN v30.8h, v30.8h, v5.8h
+
+        # Store full 6 x 8
+        B.LO 8f
+
+        $if INC:
+          ST1 {v30.16b},  [x7], x14
+          SUB  x3,  x3, x2 // a0 -= kc
+          ST1 {v28.16b}, [x13], x14
+          SUB  x9,  x9, x2 // a1 -= kc
+          ST1 {v26.16b}, [x18], x14
+          SUB x10, x10, x2 // a2 -= kc
+          ST1 {v24.16b}, [x17], x14
+          SUB x11, x11, x2 // a3 -= kc
+          ST1 {v22.16b}, [x16], x14
+          SUB x12, x12, x2 // a4 -= kc
+          ST1 {v20.16b},  [x6], x14
+          SUB  x4,  x4, x2 // a5 -= kc
+        $else:
+          ST1 {v20.16b},  [x6], x14
+          SUB  x3,  x3, x2 // a0 -= kc
+          ST1 {v22.16b}, [x16], x14
+          SUB  x9,  x9, x2 // a1 -= kc
+          ST1 {v24.16b}, [x17], x14
+          SUB x10, x10, x2 // a2 -= kc
+          ST1 {v26.16b}, [x18], x14
+          SUB x11, x11, x2 // a3 -= kc
+          ST1 {v28.16b}, [x13], x14
+          SUB x12, x12, x2 // a4 -= kc
+          ST1 {v30.16b},  [x7], x14
+          SUB  x4,  x4, x2 // a5 -= kc
+
+        B.HI 0b
+        RET
+
+5:
+        TBZ x0, 2, 7f
+6:
+        # Remainder- 2 halffloats of A (4 bytes)
+        LDR   s0,  [x3], 4
+        LDP  q16,  q17, [x5], 32
+        LDR   s1,  [x9], 4
+        LDR   s2, [x10], 4
+        LDR   s3, [x11], 4
+        LDR   s4, [x12], 4
+        LDR   s5,  [x4], 4
+
+        FMLA v20.8h, v16.8h,  v0.h[0]
+        FMLA v22.8h, v16.8h,  v1.h[0]
+        FMLA v24.8h, v16.8h,  v2.h[0]
+        FMLA v26.8h, v16.8h,  v3.h[0]
+        FMLA v28.8h, v16.8h,  v4.h[0]
+        FMLA v30.8h, v16.8h,  v5.h[0]
+
+        FMLA v20.8h, v17.8h,  v0.h[1]
+        FMLA v22.8h, v17.8h,  v1.h[1]
+        FMLA v24.8h, v17.8h,  v2.h[1]
+        FMLA v26.8h, v17.8h,  v3.h[1]
+        FMLA v28.8h, v17.8h,  v4.h[1]
+        FMLA v30.8h, v17.8h,  v5.h[1]
+
+        TBZ x0, 1, 4b
+
+7:
+        # Remainder- 1 halffloat of A (2 bytes)
+        LDR   h0,  [x3], 2
+        LDR  q16,  [x5], 16
+        LDR   h1,  [x9], 2
+        LDR   h2, [x10], 2
+        LDR   h3, [x11], 2
+        LDR   h4, [x12], 2
+        LDR   h5,  [x4], 2
+        FMLA v20.8h, v16.8h,  v0.h[0]
+        FMLA v22.8h, v16.8h,  v1.h[0]
+        FMLA v24.8h, v16.8h,  v2.h[0]
+        FMLA v26.8h, v16.8h,  v3.h[0]
+        FMLA v28.8h, v16.8h,  v4.h[0]
+        FMLA v30.8h, v16.8h,  v5.h[0]
+        B 4b
+
+        # Store odd width
+8:
+        TBZ x1, 2, 9f
+        $if INC:
+          STR d30,  [x7], 8
+          DUP d30, v30.d[1]
+          STR d28, [x13], 8
+          DUP d28, v28.d[1]
+          STR d26, [x18], 8
+          DUP d26, v26.d[1]
+          STR d24, [x17], 8
+          DUP d24, v24.d[1]
+          STR d22, [x16], 8
+          DUP d22, v22.d[1]
+          STR d20,  [x6], 8
+          DUP d20, v20.d[1]
+        $else:
+          STR d20,  [x6], 8
+          DUP d20, v20.d[1]
+          STR d22, [x16], 8
+          DUP d22, v22.d[1]
+          STR d24, [x17], 8
+          DUP d24, v24.d[1]
+          STR d26, [x18], 8
+          DUP d26, v26.d[1]
+          STR d28, [x13], 8
+          DUP d28, v28.d[1]
+          STR d30,  [x7], 8
+          DUP d30, v30.d[1]
+
+9:
+        TBZ x1, 1, 10f
+        $if INC:
+          STR s30,  [x7], 4
+          DUP s30, v30.s[1]
+          STR s28, [x13], 4
+          DUP s28, v28.s[1]
+          STR s26, [x18], 4
+          DUP s26, v26.s[1]
+          STR s24, [x17], 4
+          DUP s24, v24.s[1]
+          STR s22, [x16], 4
+          DUP s22, v22.s[1]
+          STR s20,  [x6], 4
+          DUP s20, v20.s[1]
+        $else:
+          STR s20,  [x6], 4
+          DUP s20, v20.s[1]
+          STR s22, [x16], 4
+          DUP s22, v22.s[1]
+          STR s24, [x17], 4
+          DUP s24, v24.s[1]
+          STR s26, [x18], 4
+          DUP s26, v26.s[1]
+          STR s28, [x13], 4
+          DUP s28, v28.s[1]
+          STR s30,  [x7], 4
+          DUP s30, v30.s[1]
+
+10:
+        TBZ x1, 0, 11f
+        $if INC:
+          STR h30,  [x7]
+          STR h28, [x13]
+          STR h26, [x18]
+          STR h24, [x17]
+          STR h22, [x16]
+          STR h20,  [x6]
+        $else:
+          STR h20,  [x6]
+          STR h22, [x16]
+          STR h24, [x17]
+          STR h26, [x18]
+          STR h28, [x13]
+          STR h30,  [x7]
+11:
+        RET
+
+END_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x8__aarch64_neonfp16arith_ld64
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif