| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| #include <xnnpack/assembly.h> |
| |
| # void xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32( |
| # size_t mr, x0 |
| # size_t nc, x1 |
| # size_t kc, x2 / x0 |
| # const uint8_t*restrict a, x3 |
| # size_t a_stride, x4 |
| # const void*restrict w, x5 |
| # uint8_t*restrict c, x6 |
| # size_t cm_stride, x7 |
| # size_t cn_stride, [sp] -> (x0) |
| $if INC: |
| # const float*restrict acc, [sp + 8] -> x15 |
| # const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 16] -> x8 |
| $else: |
| # const union xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) [sp + 8] -> x8 |
| |
| # d8-d15, x19-x30 need to be preserved if used. x18 is reserved by the OS. |
| |
| # A pointers |
| # x3 a0 |
| # x9 a1 |
| # x10 a2 |
| # x11 a3 |
| # x12 a4 |
| # x4 a5 |
| |
| # C pointers |
| # x6 c0 |
| # x16 c1 |
| # x17 c2 |
| # x14 c3 |
| # x13 c4 |
| # x7 c5 |
| |
| # Vector register usage |
| # A0 v0 |
| # A1 v1 |
| # A2 v2 |
| # A3 v3 |
| # A4 v4 |
| # A5 v5 |
| # B v16 v17 v18 v19 |
| # C v20 v21 |
| # C v22 v23 |
| # C v24 v25 |
| # C v26 v27 |
| # C v28 v29 |
| # C v30 v31 |
| # Clamp v6, (v4), (v5) |
| # unused A v8 v9 v10 v11 |
| # unused B v12 v13 v14 v15 |
| |
| |
| BEGIN_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32 |
| |
| $if INC: |
| # Load acc, params pointer |
| LDP x15, x8, [sp, 8] |
| $else: |
| # Load params pointer |
| LDR x8, [sp, 8] |
| |
| # Clamp A and C pointers |
| CMP x0, 2 // if mr < 2 |
| ADD x9, x3, x4 // a1 = a0 + a_stride |
| ADD x16, x6, x7 // c1 = c0 + cm_stride |
| CSEL x9, x3, x9, LO // a1 = a0 |
| CSEL x16, x6, x16, LO // c1 = c0 |
| |
| ADD x10, x9, x4 // a2 = a1 + a_stride |
| ADD x17, x16, x7 // c2 = c1 + cm_stride |
| // if mr <= 2 |
| CSEL x10, x9, x10, LS // a2 = a1 |
| CSEL x17, x16, x17, LS // c2 = c1 |
| |
| CMP x0, 4 // if mr < 4 |
| ADD x11, x10, x4 // a3 = a2 + a_stride |
| ADD x14, x17, x7 // c3 = c2 + cm_stride |
| CSEL x11, x10, x11, LO // a3 = a2 |
| CSEL x14, x17, x14, LO // c3 = c2 |
| |
| ADD x12, x11, x4 // a4 = a3 + a_stride |
| ADD x13, x14, x7 // c4 = c3 + cm_stride |
| // if mr <= 4 |
| CSEL x12, x11, x12, LS // a4 = a3 |
| CSEL x13, x14, x13, LS // c4 = c3 |
| |
| CMP x0, 6 // if mr < 6 |
| ADD x4, x12, x4 // a5 = a4 + a_stride |
| ADD x7, x13, x7 // c5 = c4 + cm_stride |
| CSEL x4, x12, x4, LO // a5 = a4 |
| CSEL x7, x13, x7, LO // c5 = c4 |
| |
| # Load params scale value |
| LD1R {v6.8h}, [x8] |
| ADD x8, x8, 2 |
| |
| 0: |
| $if INC: |
| # Load initial accumulators |
| LDP q20, q21, [x15], 32 |
| LDP q22, q23, [x15], 32 |
| LDP q24, q25, [x15], 32 |
| LDP q26, q27, [x15], 32 |
| LDP q28, q29, [x15], 32 |
| LDP q30, q31, [x15], 32 |
| $else: |
| # Load initial bias from w into accumulators |
| LDP q20, q21, [x5], 32 |
| MOV v22.16b, v20.16b |
| MOV v23.16b, v21.16b |
| MOV v24.16b, v20.16b |
| MOV v25.16b, v21.16b |
| MOV v26.16b, v20.16b |
| MOV v27.16b, v21.16b |
| MOV v28.16b, v20.16b |
| MOV v29.16b, v21.16b |
| MOV v30.16b, v20.16b |
| MOV v31.16b, v21.16b |
| |
| # Is there at least 2 halffloats (4 bytes)? |
| SUBS x0, x2, 4 // k = kc - 4 |
| B.LO 3f |
| |
| # Main loop - 2 halffloats of A (4 bytes) |
| # 24 FMA + 6 ld32 A + 4 LDR B |
| 1: |
| LDR s0, [x3], 4 |
| LDR q16, [x5], 16 |
| LDR q17, [x5], 16 |
| LDR s1, [x9], 4 |
| LDR s2, [x10], 4 |
| LDR s3, [x11], 4 |
| LDR s4, [x12], 4 |
| LDR s5, [x4], 4 |
| SUBS x0, x0, 4 |
| FMLA v20.8h, v16.8h, v0.h[0] |
| FMLA v22.8h, v16.8h, v1.h[0] |
| FMLA v24.8h, v16.8h, v2.h[0] |
| FMLA v26.8h, v16.8h, v3.h[0] |
| LDR q18, [x5], 16 |
| LDR q19, [x5], 16 |
| FMLA v28.8h, v16.8h, v4.h[0] |
| FMLA v30.8h, v16.8h, v5.h[0] |
| FMLA v21.8h, v17.8h, v0.h[0] |
| FMLA v23.8h, v17.8h, v1.h[0] |
| FMLA v25.8h, v17.8h, v2.h[0] |
| FMLA v27.8h, v17.8h, v3.h[0] |
| FMLA v29.8h, v17.8h, v4.h[0] |
| FMLA v31.8h, v17.8h, v5.h[0] |
| |
| FMLA v20.8h, v18.8h, v0.h[1] |
| FMLA v22.8h, v18.8h, v1.h[1] |
| FMLA v24.8h, v18.8h, v2.h[1] |
| FMLA v26.8h, v18.8h, v3.h[1] |
| FMLA v28.8h, v18.8h, v4.h[1] |
| FMLA v30.8h, v18.8h, v5.h[1] |
| FMLA v21.8h, v19.8h, v0.h[1] |
| FMLA v23.8h, v19.8h, v1.h[1] |
| FMLA v25.8h, v19.8h, v2.h[1] |
| FMLA v27.8h, v19.8h, v3.h[1] |
| FMLA v29.8h, v19.8h, v4.h[1] |
| FMLA v31.8h, v19.8h, v5.h[1] |
| B.HS 1b |
| |
| # Is there a remainder?- 1 halffloat of A (2 bytes) |
| TBNZ x0, 1, 3f |
| 2: |
| # Scale and Clamp |
| FMUL v20.8h, v20.8h, v6.8h |
| # Load params values |
| LD2R {v4.8h, v5.8h}, [x8] |
| FMUL v21.8h, v21.8h, v6.8h |
| FMUL v22.8h, v22.8h, v6.8h |
| FMUL v23.8h, v23.8h, v6.8h |
| FMUL v24.8h, v24.8h, v6.8h |
| FMUL v25.8h, v25.8h, v6.8h |
| FMUL v26.8h, v26.8h, v6.8h |
| FMUL v27.8h, v27.8h, v6.8h |
| FMUL v28.8h, v28.8h, v6.8h |
| FMUL v29.8h, v29.8h, v6.8h |
| FMUL v30.8h, v30.8h, v6.8h |
| FMUL v31.8h, v31.8h, v6.8h |
| # Load cn_stride |
| LDR x0, [sp, 0] |
| FMAX v20.8h, v20.8h, v4.8h |
| FMAX v21.8h, v21.8h, v4.8h |
| FMAX v22.8h, v22.8h, v4.8h |
| FMAX v23.8h, v23.8h, v4.8h |
| FMAX v24.8h, v24.8h, v4.8h |
| FMAX v25.8h, v25.8h, v4.8h |
| FMAX v26.8h, v26.8h, v4.8h |
| FMAX v27.8h, v27.8h, v4.8h |
| FMAX v28.8h, v28.8h, v4.8h |
| FMAX v29.8h, v29.8h, v4.8h |
| FMAX v30.8h, v30.8h, v4.8h |
| FMAX v31.8h, v31.8h, v4.8h |
| SUBS x1, x1, 16 |
| FMIN v20.8h, v20.8h, v5.8h |
| FMIN v21.8h, v21.8h, v5.8h |
| FMIN v22.8h, v22.8h, v5.8h |
| FMIN v23.8h, v23.8h, v5.8h |
| FMIN v24.8h, v24.8h, v5.8h |
| FMIN v25.8h, v25.8h, v5.8h |
| FMIN v26.8h, v26.8h, v5.8h |
| FMIN v27.8h, v27.8h, v5.8h |
| FMIN v28.8h, v28.8h, v5.8h |
| FMIN v29.8h, v29.8h, v5.8h |
| FMIN v30.8h, v30.8h, v5.8h |
| FMIN v31.8h, v31.8h, v5.8h |
| |
| # Store full 6 x 16 |
| B.LO 4f |
| |
| $if INC: |
| ST1 {v30.16b, v31.16b}, [x7], x0 |
| SUB x3, x3, x2 // a0 -= kc |
| ST1 {v28.16b, v29.16b}, [x13], x0 |
| SUB x9, x9, x2 // a1 -= kc |
| ST1 {v26.16b, v27.16b}, [x14], x0 |
| SUB x10, x10, x2 // a2 -= kc |
| ST1 {v24.16b, v25.16b}, [x17], x0 |
| SUB x11, x11, x2 // a3 -= kc |
| ST1 {v22.16b, v23.16b}, [x16], x0 |
| SUB x12, x12, x2 // a4 -= kc |
| ST1 {v20.16b, v21.16b}, [x6], x0 |
| SUB x4, x4, x2 // a5 -= kc |
| $else: |
| ST1 {v20.16b, v21.16b}, [x6], x0 |
| SUB x3, x3, x2 // a0 -= kc |
| ST1 {v22.16b, v23.16b}, [x16], x0 |
| SUB x9, x9, x2 // a1 -= kc |
| ST1 {v24.16b, v25.16b}, [x17], x0 |
| SUB x10, x10, x2 // a2 -= kc |
| ST1 {v26.16b, v27.16b}, [x14], x0 |
| SUB x11, x11, x2 // a3 -= kc |
| ST1 {v28.16b, v29.16b}, [x13], x0 |
| SUB x12, x12, x2 // a4 -= kc |
| ST1 {v30.16b, v31.16b}, [x7], x0 |
| SUB x4, x4, x2 // a5 -= kc |
| |
| B.HI 0b |
| RET |
| |
| 3: |
| # Remainder- 1 halffloat of A (2 bytes) |
| LDR h0, [x3], 2 |
| LDR q16, [x5], 16 |
| LDR q17, [x5], 16 |
| LDR h1, [x9], 2 |
| LDR h2, [x10], 2 |
| LDR h3, [x11], 2 |
| LDR h4, [x12], 2 |
| LDR h5, [x4], 2 |
| FMLA v20.8h, v16.8h, v0.h[0] |
| FMLA v22.8h, v16.8h, v1.h[0] |
| FMLA v24.8h, v16.8h, v2.h[0] |
| FMLA v26.8h, v16.8h, v3.h[0] |
| FMLA v28.8h, v16.8h, v4.h[0] |
| FMLA v30.8h, v16.8h, v5.h[0] |
| FMLA v21.8h, v17.8h, v0.h[0] |
| FMLA v23.8h, v17.8h, v1.h[0] |
| FMLA v25.8h, v17.8h, v2.h[0] |
| FMLA v27.8h, v17.8h, v3.h[0] |
| FMLA v29.8h, v17.8h, v4.h[0] |
| FMLA v31.8h, v17.8h, v5.h[0] |
| B 2b |
| |
| # Store odd width |
| 4: |
| TBZ x1, 3, 5f |
| $if INC: |
| STR q30, [x7], 16 |
| MOV v30.16b, v31.16b |
| STR q28, [x13], 16 |
| MOV v28.16b, v29.16b |
| STR q26, [x14], 16 |
| MOV v26.16b, v27.16b |
| STR q24, [x17], 16 |
| MOV v24.16b, v25.16b |
| STR q22, [x16], 16 |
| MOV v22.16b, v23.16b |
| STR q20, [x6], 16 |
| MOV v20.16b, v21.16b |
| $else: |
| STR q20, [x6], 16 |
| MOV v20.16b, v21.16b |
| STR q22, [x16], 16 |
| MOV v22.16b, v23.16b |
| STR q24, [x17], 16 |
| MOV v24.16b, v25.16b |
| STR q26, [x14], 16 |
| MOV v26.16b, v27.16b |
| STR q28, [x13], 16 |
| MOV v28.16b, v29.16b |
| STR q30, [x7], 16 |
| MOV v30.16b, v31.16b |
| |
| 5: |
| TBZ x1, 2, 6f |
| $if INC: |
| STR d30, [x7], 8 |
| DUP d30, v30.d[1] |
| STR d28, [x13], 8 |
| DUP d28, v28.d[1] |
| STR d26, [x14], 8 |
| DUP d26, v26.d[1] |
| STR d24, [x17], 8 |
| DUP d24, v24.d[1] |
| STR d22, [x16], 8 |
| DUP d22, v22.d[1] |
| STR d20, [x6], 8 |
| DUP d20, v20.d[1] |
| $else: |
| STR d20, [x6], 8 |
| DUP d20, v20.d[1] |
| STR d22, [x16], 8 |
| DUP d22, v22.d[1] |
| STR d24, [x17], 8 |
| DUP d24, v24.d[1] |
| STR d26, [x14], 8 |
| DUP d26, v26.d[1] |
| STR d28, [x13], 8 |
| DUP d28, v28.d[1] |
| STR d30, [x7], 8 |
| DUP d30, v30.d[1] |
| |
| 6: |
| TBZ x1, 1, 7f |
| $if INC: |
| STR s30, [x7], 4 |
| DUP s30, v30.s[1] |
| STR s28, [x13], 4 |
| DUP s28, v28.s[1] |
| STR s26, [x14], 4 |
| DUP s26, v26.s[1] |
| STR s24, [x17], 4 |
| DUP s24, v24.s[1] |
| STR s22, [x16], 4 |
| DUP s22, v22.s[1] |
| STR s20, [x6], 4 |
| DUP s20, v20.s[1] |
| $else: |
| STR s20, [x6], 4 |
| DUP s20, v20.s[1] |
| STR s22, [x16], 4 |
| DUP s22, v22.s[1] |
| STR s24, [x17], 4 |
| DUP s24, v24.s[1] |
| STR s26, [x14], 4 |
| DUP s26, v26.s[1] |
| STR s28, [x13], 4 |
| DUP s28, v28.s[1] |
| STR s30, [x7], 4 |
| DUP s30, v30.s[1] |
| |
| 7: |
| TBZ x1, 0, 8f |
| $if INC: |
| STR h30, [x7] |
| STR h28, [x13] |
| STR h26, [x14] |
| STR h24, [x17] |
| STR h22, [x16] |
| STR h20, [x6] |
| $else: |
| STR h20, [x6] |
| STR h22, [x16] |
| STR h24, [x17] |
| STR h26, [x14] |
| STR h28, [x13] |
| STR h30, [x7] |
| 8: |
| RET |
| |
| END_FUNCTION xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_6x16__aarch64_neonfp16arith_ld32 |
| |
| #ifdef __ELF__ |
| .section ".note.GNU-stack","",%progbits |
| #endif |