QS8 4x16c4-aarch64-neondot-ld64 IGEMM microkernel

PiperOrigin-RevId: 362158244
diff --git a/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S b/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S
new file mode 100644
index 0000000..d6a9fbf
--- /dev/null
+++ b/src/qs8-igemm/4x16c4-aarch64-neondot-ld64.S
@@ -0,0 +1,375 @@
+// Copyright 2021 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <xnnpack/assembly.h>
+
+# void xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64(
+#     size_t mr,                 x0
+#     size_t nc,                 x1
+#     size_t kc,                 x2 / x0
+#     size_t ks,                 x3 / x9
+#     const int8_t**restrict a,  x4
+#     const int8_t* restrict w,  x5
+#     int8_t* restrict c,        x6
+#     size_t cm_stride,          x7
+#     size_t cn_stride,                  [sp] -> x10
+#     size_t a_offset,                   [sp + 8] -> x11
+#     const float* zero,                 [sp + 16] -> x12
+#     const xnn_f32_minmax_params params [sp + 24] -> x8
+
+# d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS.
+
+# Register usage
+# A0  x20  v0
+# A1  x15  v1
+# A2  x13  v2
+# A3  x21  v3
+# B    x5  v4  v5  v6  v7
+# C0   x6 v16 v20 v24 v28
+# C1  x16 v17 v21 v25 v29
+# C2  x17 v18 v22 v26 v30
+# C3   x7 v19 v23 v27 v31
+# unused v8 v9 v10 v11 v12 v13 v14 v15
+
+BEGIN_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64
+
+        # Clamp C pointers
+        CMP      x0, 2                // if mr < 2
+        LDP     x10, x11, [sp]        // Load cn_stride, a_offset
+        ADD     x16, x6, x7           // c1 = c0 + cm_stride
+        CSEL    x16, x6,  x16, LO     //   c1 = c0
+        ADD      x2, x2, 3            // kc = (kc + 3) & ~3
+
+        ADD     x17, x16, x7          // c2 = c1 + cm_stride
+        LDP     x12, x8, [sp, 16]     // Load zero, params pointer
+                                      // if mr <= 2
+        CSEL    x17, x16, x17, LS     //   c2 = c1
+        BIC      x2, x2, 3
+
+        CMP      x0, 4                // if mr < 4
+        STP     x20, x21, [sp, -16]!  // Save x20-x21 on stack
+        ADD      x7,  x17, x7         // c3 = c2 + cm_stride
+        CSEL     x7,  x17, x7, LO     //   c3 = c2
+
+        .p2align 3
+0:
+        # Load initial bias from w into accumulators
+        LDP     q16, q20, [x5], 32
+        MOV     v17.16b, v16.16b
+        MOV     v18.16b, v16.16b
+        LDP     q24, q28, [x5], 32
+        MOV     v19.16b, v16.16b
+        MOV     v21.16b, v20.16b
+        MOV     v22.16b, v20.16b
+        MOV     v23.16b, v20.16b
+        MOV     v25.16b, v24.16b
+        MOV     v26.16b, v24.16b
+        MOV     v27.16b, v24.16b
+        MOV     v29.16b, v28.16b
+        MOV     v30.16b, v28.16b
+        MOV     v31.16b, v28.16b
+        MOV     x9, x3  // p = ks
+
+        .p2align 3
+1:
+        # Load next 4 A pointers
+        LDP     x20, x15, [x4], 16
+        LDP     x13, x21, [x4], 16
+
+        CMP     x20, x12           // if a0 == zero
+        ADD     x20, x20, x11      // a0 += a_offset
+        CSEL    x20, x12, x20, EQ  //   a0 = zero, else += a0 + a_offset
+        CMP     x15, x12           // if a1 == zero
+        ADD     x15, x15, x11      // a1 += a_offset
+        CSEL    x15, x12, x15, EQ  //   a1 = zero, else += a1 + a_offset
+        CMP     x13, x12           // if a2 == zero
+        ADD     x13, x13, x11      // a2 += a_offset
+        CSEL    x13, x12, x13, EQ  //   a2 = zero, else += a2 + a_offset
+        CMP     x21, x12           // if a3 == zero
+        ADD     x21, x21, x11      // a3 += a_offset
+        CSEL    x21, x12, x21, EQ  //   a3 = zero, else += a3 + a_offset
+
+        # Is there at least 8 bytes for main loop?
+        SUBS    x0, x2, 8          // k = kc - 8
+        B.LO    4f
+
+        # Main loop - 8 bytes of A
+        .p2align 3
+2:
+        LDR     d0, [x20], 8
+        LDR     q4,  [x5], 16
+        LDR     d1, [x15], 8
+        LDR     d2, [x13], 8
+        LDR     d3, [x21], 8
+        LDR     q5,  [x5], 16
+        SDOT    v16.4s, v4.16b,  v0.4b[0]
+        SDOT    v17.4s, v4.16b,  v1.4b[0]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[0]
+        SDOT    v19.4s, v4.16b,  v3.4b[0]
+        SDOT    v20.4s, v5.16b,  v0.4b[0]
+        SDOT    v21.4s, v5.16b,  v1.4b[0]
+        SDOT    v22.4s, v5.16b,  v2.4b[0]
+        SDOT    v23.4s, v5.16b,  v3.4b[0]
+        SDOT    v24.4s, v6.16b, v0.4b[0]
+        SDOT    v25.4s, v6.16b, v1.4b[0]
+        LDP     q4, q5, [x5], 32
+        SDOT    v26.4s, v6.16b, v2.4b[0]
+        SDOT    v27.4s, v6.16b, v3.4b[0]
+        SDOT    v28.4s, v7.16b, v0.4b[0]
+        SDOT    v29.4s, v7.16b, v1.4b[0]
+        SDOT    v30.4s, v7.16b, v2.4b[0]
+        SDOT    v31.4s, v7.16b, v3.4b[0]
+        SDOT    v16.4s, v4.16b,  v0.4b[1]
+        SDOT    v17.4s, v4.16b,  v1.4b[1]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[1]
+        SDOT    v19.4s, v4.16b,  v3.4b[1]
+        SDOT    v20.4s, v5.16b,  v0.4b[1]
+        SDOT    v21.4s, v5.16b,  v1.4b[1]
+        SDOT    v22.4s, v5.16b,  v2.4b[1]
+        SDOT    v23.4s, v5.16b,  v3.4b[1]
+        SDOT    v24.4s, v6.16b,  v0.4b[1]
+        SDOT    v25.4s, v6.16b,  v1.4b[1]
+        SDOT    v26.4s, v6.16b,  v2.4b[1]
+        SDOT    v27.4s, v6.16b,  v3.4b[1]
+        SDOT    v28.4s, v7.16b,  v0.4b[1]
+        SDOT    v29.4s, v7.16b,  v1.4b[1]
+        SDOT    v30.4s, v7.16b,  v2.4b[1]
+        SUBS    x0, x0, 8
+        SDOT    v31.4s, v7.16b,  v3.4b[1]
+        B.HS    2b
+
+        # Is there a remainder?- 4 bytes of A
+        TBNZ    x0, 2, 4f
+
+3:
+        # ks loop
+        SUBS    x9, x9, 32  // ks -= MR * sizeof(int8_t*)
+        B.HI    1b
+
+        # Apply params - scale, shift, bias and clamp
+        LD2R    {v0.4s, v1.4s}, [x8], 8
+        CMEQ    v2.4s, v1.4s, 0
+
+        BIC     v4.16b, v16.16b, v2.16b
+        BIC     v5.16b, v17.16b, v2.16b
+        BIC     v6.16b, v18.16b, v2.16b
+        BIC     v7.16b, v19.16b, v2.16b
+
+        SQRDMULH  v16.4s, v16.4s, v0.4s
+        SQRDMULH  v17.4s, v17.4s, v0.4s
+        SQRDMULH  v18.4s, v18.4s, v0.4s
+        SQRDMULH  v19.4s, v19.4s, v0.4s
+
+        SSRA    v16.4s, v4.4s, 31  // signed shift right accumulate
+        SSRA    v17.4s, v5.4s, 31
+        SSRA    v18.4s, v6.4s, 31
+        SSRA    v19.4s, v7.4s, 31
+
+        BIC     v4.16b, v20.16b, v2.16b
+        BIC     v5.16b, v21.16b, v2.16b
+        BIC     v6.16b, v22.16b, v2.16b
+        BIC     v7.16b, v23.16b, v2.16b
+
+        SQRDMULH  v20.4s, v20.4s, v0.4s
+        SQRDMULH  v21.4s, v21.4s, v0.4s
+        SQRDMULH  v22.4s, v22.4s, v0.4s
+        SQRDMULH  v23.4s, v23.4s, v0.4s
+
+        SSRA    v20.4s, v4.4s, 31
+        SSRA    v21.4s, v5.4s, 31
+        SSRA    v22.4s, v6.4s, 31
+        SSRA    v23.4s, v7.4s, 31
+
+        BIC     v4.16b, v24.16b, v2.16b
+        BIC     v5.16b, v25.16b, v2.16b
+        BIC     v6.16b, v26.16b, v2.16b
+        BIC     v7.16b, v27.16b, v2.16b
+
+        SQRDMULH  v24.4s, v24.4s, v0.4s
+        SQRDMULH  v25.4s, v25.4s, v0.4s
+        SQRDMULH  v26.4s, v26.4s, v0.4s
+        SQRDMULH  v27.4s, v27.4s, v0.4s
+
+        SSRA    v24.4s, v4.4s, 31
+        SSRA    v25.4s, v5.4s, 31
+        SSRA    v26.4s, v6.4s, 31
+        SSRA    v27.4s, v7.4s, 31
+
+        BIC     v4.16b, v28.16b, v2.16b
+        BIC     v5.16b, v29.16b, v2.16b
+        BIC     v6.16b, v30.16b, v2.16b
+        BIC     v7.16b, v31.16b, v2.16b
+
+        SQRDMULH  v28.4s, v28.4s, v0.4s
+        SQRDMULH  v29.4s, v29.4s, v0.4s
+        SQRDMULH  v30.4s, v30.4s, v0.4s
+        SQRDMULH  v31.4s, v31.4s, v0.4s
+
+        SSRA    v28.4s, v4.4s, 31
+        SSRA    v29.4s, v5.4s, 31
+        SSRA    v30.4s, v6.4s, 31
+        SSRA    v31.4s, v7.4s, 31
+
+        SRSHL   v16.4s, v16.4s, v1.4s  // signed rounding shift left
+        SRSHL   v17.4s, v17.4s, v1.4s
+        SRSHL   v18.4s, v18.4s, v1.4s
+        SRSHL   v19.4s, v19.4s, v1.4s
+        SRSHL   v20.4s, v20.4s, v1.4s
+        SRSHL   v21.4s, v21.4s, v1.4s
+        SRSHL   v22.4s, v22.4s, v1.4s
+        SRSHL   v23.4s, v23.4s, v1.4s
+        SRSHL   v24.4s, v24.4s, v1.4s
+        SRSHL   v25.4s, v25.4s, v1.4s
+        SRSHL   v26.4s, v26.4s, v1.4s
+        SRSHL   v27.4s, v27.4s, v1.4s
+        SRSHL   v28.4s, v28.4s, v1.4s
+        SRSHL   v29.4s, v29.4s, v1.4s
+        SRSHL   v30.4s, v30.4s, v1.4s
+        SRSHL   v31.4s, v31.4s, v1.4s
+
+        SQXTN   v16.4h, v16.4s
+        SQXTN   v17.4h, v17.4s
+        SQXTN   v18.4h, v18.4s
+        SQXTN   v19.4h, v19.4s
+        SQXTN   v24.4h, v24.4s
+        SQXTN   v25.4h, v25.4s
+        SQXTN   v26.4h, v26.4s
+        SQXTN   v27.4h, v27.4s
+        LD1R    {v2.8h}, [x8], 2   // add bias
+
+        SQXTN2  v16.8h, v20.4s
+        SQXTN2  v17.8h, v21.4s
+        SQXTN2  v18.8h, v22.4s
+        SQXTN2  v19.8h, v23.4s
+        SQXTN2  v24.8h, v28.4s
+        SQXTN2  v25.8h, v29.4s
+        SQXTN2  v26.8h, v30.4s
+        SQXTN2  v27.8h, v31.4s
+
+        SQADD   v16.8h, v16.8h, v2.8h
+        SQADD   v17.8h, v17.8h, v2.8h
+        SQADD   v18.8h, v18.8h, v2.8h
+        SQADD   v19.8h, v19.8h, v2.8h
+        SQADD   v24.8h, v24.8h, v2.8h
+        SQADD   v25.8h, v25.8h, v2.8h
+        SQADD   v26.8h, v26.8h, v2.8h
+        SQADD   v27.8h, v27.8h, v2.8h
+        LD1R    {v0.16b}, [x8], 1  // clamp min value
+
+        SQXTN    v4.8b, v16.8h
+        SQXTN    v5.8b, v17.8h
+        SQXTN    v6.8b, v18.8h
+        SQXTN    v7.8b, v19.8h
+        LD1R    {v1.16b}, [x8]     // clamp max value
+        SQXTN2   v4.16b, v24.8h
+        SQXTN2   v5.16b, v25.8h
+        SQXTN2   v6.16b, v26.8h
+        SQXTN2   v7.16b, v27.8h
+        SUB      x8, x8, 11       // rewind params pointer
+
+        SMAX    v4.16b, v4.16b, v0.16b
+        SMAX    v5.16b, v5.16b, v0.16b
+        SMAX    v6.16b, v6.16b, v0.16b
+        SMAX    v7.16b, v7.16b, v0.16b
+        SUBS    x1, x1, 16
+        SMIN    v4.16b, v4.16b, v1.16b
+        SMIN    v5.16b, v5.16b, v1.16b
+        SMIN    v6.16b, v6.16b, v1.16b
+        SMIN    v7.16b, v7.16b, v1.16b
+        B.LO    5f
+
+        # Store full 4 x 16
+        ST1     {v7.16b},  [x7], x10
+        ST1     {v6.16b}, [x17], x10
+        ST1     {v5.16b}, [x16], x10
+        ST1     {v4.16b},  [x6], x10
+
+        SUB     x4, x4, x3  // a -= ks
+
+        # nc loop
+        B.HI    0b
+
+        # Restore x20-x21 from stack
+        LDP     x20, x21, [sp], 16
+        RET
+
+        # Remainder- 4 bytes of A
+        .p2align 3
+4:
+        LDR     s0, [x20], 4
+        LDR     q4,  [x5], 16
+        LDR     s1, [x15], 4
+        LDR     s2, [x13], 4
+        LDR     s3, [x21], 4
+        LDR     q5,  [x5], 16
+        SDOT    v16.4s, v4.16b,  v0.4b[0]
+        SDOT    v17.4s, v4.16b,  v1.4b[0]
+        LDP     q6, q7, [x5], 32
+        SDOT    v18.4s, v4.16b,  v2.4b[0]
+        SDOT    v19.4s, v4.16b,  v3.4b[0]
+        SDOT    v20.4s, v5.16b,  v0.4b[0]
+        SDOT    v21.4s, v5.16b,  v1.4b[0]
+        SDOT    v22.4s, v5.16b,  v2.4b[0]
+        SDOT    v23.4s, v5.16b,  v3.4b[0]
+        SDOT    v24.4s, v6.16b, v0.4b[0]
+        SDOT    v25.4s, v6.16b, v1.4b[0]
+        SDOT    v26.4s, v6.16b, v2.4b[0]
+        SDOT    v27.4s, v6.16b, v3.4b[0]
+        SDOT    v28.4s, v7.16b, v0.4b[0]
+        SDOT    v29.4s, v7.16b, v1.4b[0]
+        SDOT    v30.4s, v7.16b, v2.4b[0]
+        SDOT    v31.4s, v7.16b, v3.4b[0]
+        B       3b
+
+        # Store odd width
+        .p2align 3
+5:
+        TBZ     x1, 3, 6f
+        STR     d7, [x7], 8
+        DUP     d7, v7.d[1]
+        STR     d6, [x17], 8
+        DUP     d6, v6.d[1]
+        STR     d5, [x16], 8
+        DUP     d5, v5.d[1]
+        STR     d4, [x6], 8
+        DUP     d4, v4.d[1]
+6:
+        TBZ     x1, 2, 7f
+        STR     s7, [x7], 4
+        DUP     s7, v7.s[1]
+        STR     s6, [x17], 4
+        DUP     s6, v6.s[1]
+        STR     s5, [x16], 4
+        DUP     s5, v5.s[1]
+        STR     s4, [x6], 4
+        DUP     s4, v4.s[1]
+7:
+        TBZ     x1, 1, 8f
+        ST1     {v7.h}[0], [x7], 2
+        DUP      h7, v7.h[1]
+        ST1     {v6.h}[0], [x17], 2
+        DUP      h6, v6.h[1]
+        ST1     {v5.h}[0], [x16], 2
+        DUP      h5, v5.h[1]
+        ST1     {v4.h}[0], [x6], 2
+        DUP      h4, v4.h[1]
+8:
+        TBZ     x1, 0, 9f
+        ST1     {v7.b}[0], [x7]
+        ST1     {v6.b}[0], [x17]
+        ST1     {v5.b}[0], [x16]
+        ST1     {v4.b}[0], [x6]
+9:
+        # Restore x20-x21 from stack
+        LDP x20, x21, [sp], 16
+        RET
+
+END_FUNCTION xnn_qs8_igemm_minmax_ukernel_4x16c4__aarch64_neondot_ld64
+
+#ifdef __ELF__
+.section ".note.GNU-stack","",%progbits
+#endif