blob: 5a89619d825045e93935ce379d87e78457fede3e [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert NR % 4 == 0
7$ABC = "0123456789ABCDEFGHIJKLMN"
8#include <assert.h>
9
10#include <arm_neon.h>
11
12#include <xnnpack/gemm.h>
13
14
15void xnn_f32_gemm${"inc" if INC else ""}_ukernel_${MR}x${NR}__${"neonfma" if FMA else "neon"}_ld64(
16 size_t mr,
17 size_t nc,
18 size_t kc,
19 const float* restrict a,
20 size_t a_stride,
21 const float* restrict w,
22 float* restrict c,
23 size_t cm_stride,
24 size_t cn_stride,
25 $if INC:
26 const float*restrict acc,
27 const union xnn_f32_output_params params[restrict static 1])
28{
29 assert(mr != 0);
30 assert(mr <= ${MR});
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37 $if INC:
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42 $for M in range(1, MR):
43 const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
44 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
45 $if M % 2 == 0:
46 if XNN_UNPREDICTABLE(mr <= ${M}) {
47 a${M} = a${M-1};
48 c${M} = c${M-1};
49 }
50 $elif M + 1 == MR:
51 if XNN_UNPREDICTABLE(mr != ${M+1}) {
52 a${M} = a${M-1};
53 c${M} = c${M-1};
54 }
55 $else:
56 if XNN_UNPREDICTABLE(mr < ${M+1}) {
57 a${M} = a${M-1};
58 c${M} = c${M-1};
59 }
60
61 do {
62 $if INC:
63 $for M in range(MR):
64 $for N in range(0, NR, 4):
65 float32x4_t vacc${M}x${ABC[N:N+4]} = vld1q_f32(acc); acc += 4;
66 $else:
67 $for N in range(0, NR, 4):
68 float32x4_t vacc0x${ABC[N:N+4]} = vld1q_f32(w); w += 4;
69 $for M in range(1, MR):
70 $for N in range(0, NR, 4):
71 float32x4_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]};
72
73 size_t k = kc;
74 for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
75 $for M in range(MR):
76 const float32x2_t va${M} = vld1_f32(a${M}); a${M} += 2;
77
78 $for L in range(2):
79 $for N in range(0, NR, 4):
80 const float32x4_t vb${ABC[N:N+4]}c${L} = vld1q_f32(w); w += 4;
81
82 $if FMA:
83 #if defined(__aarch64__)
84 $for N in range(0, NR, 4):
85 $for M in range(MR):
86 vacc${M}x${ABC[N:N+4]} = vfmaq_lane_f32(vacc${M}x${ABC[N:N+4]}, vb${ABC[N:N+4]}c${L}, va${M}, ${L});
87 #else
88 $for M in range(MR):
89 const float32x4_t va${M}c${L} = vdupq_lane_f32(va${M}, ${L});
90 $for N in range(0, NR, 4):
91 $for M in range(MR):
92 vacc${M}x${ABC[N:N+4]} = vfmaq_f32(vacc${M}x${ABC[N:N+4]}, va${M}c${L}, vb${ABC[N:N+4]}c${L});
93 #endif
94 $else:
95 $for N in range(0, NR, 4):
96 $for M in range(MR):
97 vacc${M}x${ABC[N:N+4]} = vmlaq_lane_f32(vacc${M}x${ABC[N:N+4]}, vb${ABC[N:N+4]}c${L}, va${M}, ${L});
98 }
99 if XNN_UNLIKELY(k != 0) {
100 $for M in range(MR):
101 const float32x4_t va${M} = vld1q_dup_f32(a${M}); a${M} += 1;
102
103 $for N in range(0, NR, 4):
104 const float32x4_t vb${ABC[N:N+4]} = vld1q_f32(w); w += 4;
105
106 $for N in range(0, NR, 4):
107 $for M in range(MR):
108 $if FMA:
109 vacc${M}x${ABC[N:N+4]} = vfmaq_f32(vacc${M}x${ABC[N:N+4]}, va${M}, vb${ABC[N:N+4]});
110 $else:
111 vacc${M}x${ABC[N:N+4]} = vmlaq_f32(vacc${M}x${ABC[N:N+4]}, va${M}, vb${ABC[N:N+4]});
112 }
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700113 const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700114 $for N in range(0, NR, 4):
115 $for M in range(MR):
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700116 vacc${M}x${ABC[N:N+4]} = vminq_f32(vacc${M}x${ABC[N:N+4]}, vmax);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700117
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700118 const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700119 $for N in range(0, NR, 4):
120 $for M in range(MR):
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700121 vacc${M}x${ABC[N:N+4]} = vmaxq_f32(vacc${M}x${ABC[N:N+4]}, vmin);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700122
123 if XNN_LIKELY(nc >= ${NR}) {
124 $for M in reversed(range(MR)):
125 vst1q_f32(c${M}, vacc${M}x${ABC[0:4]});
126 $for N in range(4, NR, 4):
127 vst1q_f32(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
128 c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
129
130 $for M in reversed(range(MR)):
131 a${M} = (const float*) ((uintptr_t) a${M} - kc);
132
133 nc -= ${NR};
134
135 } else {
136 $for LOG2N in reversed(range(NR.bit_length())):
137 $if NR != 1 << LOG2N:
138 if (nc & ${1 << LOG2N}) {
139 $if LOG2N >= 2:
140 $for N in range(0, 1 << LOG2N, 4):
141 $for M in reversed(range(MR)):
142 vst1q_f32(c${M}, vacc${M}x${ABC[N:N+4]}); c${M} += 4;
143
144 $for M in reversed(range(MR)):
145 $for N in range(0, 1 << (LOG2N - 1), 4):
146 vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]};
147 $elif LOG2N == 1:
148 $for M in reversed(range(MR)):
149 vst1_f32(c${M}, vacc${M}x${ABC[0:2]}); c${M} += 2;
150
151 $for M in reversed(range(MR)):
152 vacc${M}x${ABC[0:2]} = vget_high_f32(vacc${M}x${ABC[0:4]});
153 $elif LOG2N == 0:
154 $for M in reversed(range(MR)):
155 vst1_lane_f32(c${M}, vacc${M}x${ABC[0:2]}, 0);
156 }
157 $if LOG2N == 2:
158 $for M in reversed(range(MR)):
159 float32x2_t vacc${M}x${ABC[0:2]} = vget_low_f32(vacc${M}x${ABC[0:4]});
160
161 nc = 0;
162 }
163 } while (nc != 0);
164}