blob: f5b90f508083be2d3a6843cfd7f4e67184070a43 [file] [log] [blame]
Marat Dukhan27121322019-12-09 14:57:40 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx-shuffle4.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemminc_ukernel_5x16s4__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const float*restrict acc,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhan27121322019-12-09 14:57:40 -080029{
30 assert(mr != 0);
31 assert(mr <= 5);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66
67 do {
68 __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
69 __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
70 __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
71 __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
72 __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
73 __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
74 __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
75 __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
76 __m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
77 __m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
78 acc += 80;
79
80 size_t k = kc;
81 while (k >= 4 * sizeof(float)) {
82 __m256 va0 = _mm256_broadcast_ps((const __m128*) a0);
83 a0 += 4;
84 __m256 va1 = _mm256_broadcast_ps((const __m128*) a1);
85 a1 += 4;
86 __m256 va2 = _mm256_broadcast_ps((const __m128*) a2);
87 a2 += 4;
88 __m256 va3 = _mm256_broadcast_ps((const __m128*) a3);
89 a3 += 4;
90 __m256 va4 = _mm256_broadcast_ps((const __m128*) a4);
91 a4 += 4;
92
93
94 const __m256 vb01234567c0 = _mm256_load_ps(w + 0);
95 const __m256 vb89ABCDEFc0 = _mm256_load_ps(w + 8);
96
97 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c0, vacc0x01234567);
98 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c0, vacc1x01234567);
99 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c0, vacc2x01234567);
100 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c0, vacc3x01234567);
101 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c0, vacc4x01234567);
102 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc0, vacc0x89ABCDEF);
103 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc0, vacc1x89ABCDEF);
104 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc0, vacc2x89ABCDEF);
105 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc0, vacc3x89ABCDEF);
106 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc0, vacc4x89ABCDEF);
107
108 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
109 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
110 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
111 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
112 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
113
114 const __m256 vb01234567c1 = _mm256_load_ps(w + 16);
115 const __m256 vb89ABCDEFc1 = _mm256_load_ps(w + 24);
116
117 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c1, vacc0x01234567);
118 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c1, vacc1x01234567);
119 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c1, vacc2x01234567);
120 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c1, vacc3x01234567);
121 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c1, vacc4x01234567);
122 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc1, vacc0x89ABCDEF);
123 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc1, vacc1x89ABCDEF);
124 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc1, vacc2x89ABCDEF);
125 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc1, vacc3x89ABCDEF);
126 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc1, vacc4x89ABCDEF);
127
128 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
129 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
130 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
131 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
132 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
133
134 const __m256 vb01234567c2 = _mm256_load_ps(w + 32);
135 const __m256 vb89ABCDEFc2 = _mm256_load_ps(w + 40);
136
137 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c2, vacc0x01234567);
138 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c2, vacc1x01234567);
139 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c2, vacc2x01234567);
140 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c2, vacc3x01234567);
141 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c2, vacc4x01234567);
142 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc2, vacc0x89ABCDEF);
143 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc2, vacc1x89ABCDEF);
144 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc2, vacc2x89ABCDEF);
145 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc2, vacc3x89ABCDEF);
146 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc2, vacc4x89ABCDEF);
147
148 va0 = _mm256_permute_ps(va0, _MM_SHUFFLE(0, 3, 2, 1));
149 va1 = _mm256_permute_ps(va1, _MM_SHUFFLE(0, 3, 2, 1));
150 va2 = _mm256_permute_ps(va2, _MM_SHUFFLE(0, 3, 2, 1));
151 va3 = _mm256_permute_ps(va3, _MM_SHUFFLE(0, 3, 2, 1));
152 va4 = _mm256_permute_ps(va4, _MM_SHUFFLE(0, 3, 2, 1));
153
154 const __m256 vb01234567c3 = _mm256_load_ps(w + 48);
155 const __m256 vb89ABCDEFc3 = _mm256_load_ps(w + 56);
156
157 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567c3, vacc0x01234567);
158 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567c3, vacc1x01234567);
159 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567c3, vacc2x01234567);
160 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567c3, vacc3x01234567);
161 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567c3, vacc4x01234567);
162 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEFc3, vacc0x89ABCDEF);
163 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEFc3, vacc1x89ABCDEF);
164 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEFc3, vacc2x89ABCDEF);
165 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEFc3, vacc3x89ABCDEF);
166 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEFc3, vacc4x89ABCDEF);
167
168
169 w += 64;
170 k -= 4 * sizeof(float);
171 }
172 if XNN_UNLIKELY(k != 0) {
173 do {
174 const __m256 va0 = _mm256_broadcast_ss(a0);
175 a0 += 1;
176 const __m256 va1 = _mm256_broadcast_ss(a1);
177 a1 += 1;
178 const __m256 va2 = _mm256_broadcast_ss(a2);
179 a2 += 1;
180 const __m256 va3 = _mm256_broadcast_ss(a3);
181 a3 += 1;
182 const __m256 va4 = _mm256_broadcast_ss(a4);
183 a4 += 1;
184
185 const __m256 vb01234567 = _mm256_load_ps(w);
186 const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
187 w += 16;
188
189 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
190 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
191 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
192 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
193 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
194 vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
195 vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
196 vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
197 vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
198 vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
199
200 k -= sizeof(float);
201 } while (k != 0);
202 }
203
204 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
205 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
206 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
207 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
208 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
209 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
210 vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
211 vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
212 vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
213 vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
214 vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
215
216 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
217 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
218 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
219 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
220 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
221 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
222 vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
223 vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
224 vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
225 vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
226 vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
227
228 if XNN_LIKELY(nc >= 16) {
229 _mm256_storeu_ps(c4, vacc4x01234567);
230 _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
231 c4 = (float*) ((uintptr_t) c4 + cn_stride);
232 _mm256_storeu_ps(c3, vacc3x01234567);
233 _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
234 c3 = (float*) ((uintptr_t) c3 + cn_stride);
235 _mm256_storeu_ps(c2, vacc2x01234567);
236 _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
237 c2 = (float*) ((uintptr_t) c2 + cn_stride);
238 _mm256_storeu_ps(c1, vacc1x01234567);
239 _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
240 c1 = (float*) ((uintptr_t) c1 + cn_stride);
241 _mm256_storeu_ps(c0, vacc0x01234567);
242 _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
243 c0 = (float*) ((uintptr_t) c0 + cn_stride);
244
245 a4 = (const float*) ((uintptr_t) a4 - kc);
246 a3 = (const float*) ((uintptr_t) a3 - kc);
247 a2 = (const float*) ((uintptr_t) a2 - kc);
248 a1 = (const float*) ((uintptr_t) a1 - kc);
249 a0 = (const float*) ((uintptr_t) a0 - kc);
250
251 nc -= 16;
252 } else {
253 if (nc & 8) {
254 _mm256_storeu_ps(c4, vacc4x01234567);
255 _mm256_storeu_ps(c3, vacc3x01234567);
256 _mm256_storeu_ps(c2, vacc2x01234567);
257 _mm256_storeu_ps(c1, vacc1x01234567);
258 _mm256_storeu_ps(c0, vacc0x01234567);
259
260 vacc4x01234567 = vacc4x89ABCDEF;
261 vacc3x01234567 = vacc3x89ABCDEF;
262 vacc2x01234567 = vacc2x89ABCDEF;
263 vacc1x01234567 = vacc1x89ABCDEF;
264 vacc0x01234567 = vacc0x89ABCDEF;
265
266 c4 += 8;
267 c3 += 8;
268 c2 += 8;
269 c1 += 8;
270 c0 += 8;
271 }
272 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
273 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
274 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
275 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
276 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
277 if (nc & 4) {
278 _mm_storeu_ps(c4, vacc4x0123);
279 _mm_storeu_ps(c3, vacc3x0123);
280 _mm_storeu_ps(c2, vacc2x0123);
281 _mm_storeu_ps(c1, vacc1x0123);
282 _mm_storeu_ps(c0, vacc0x0123);
283
284 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
285 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
286 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
287 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
288 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
289
290 c4 += 4;
291 c3 += 4;
292 c2 += 4;
293 c1 += 4;
294 c0 += 4;
295 }
296 if (nc & 2) {
297 _mm_storel_pi((__m64*) c4, vacc4x0123);
298 _mm_storel_pi((__m64*) c3, vacc3x0123);
299 _mm_storel_pi((__m64*) c2, vacc2x0123);
300 _mm_storel_pi((__m64*) c1, vacc1x0123);
301 _mm_storel_pi((__m64*) c0, vacc0x0123);
302
303 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
304 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
305 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
306 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
307 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
308
309 c4 += 2;
310 c3 += 2;
311 c2 += 2;
312 c1 += 2;
313 c0 += 2;
314 }
315 if (nc & 1) {
316 _mm_store_ss(c4, vacc4x0123);
317 _mm_store_ss(c3, vacc3x0123);
318 _mm_store_ss(c2, vacc2x0123);
319 _mm_store_ss(c1, vacc1x0123);
320 _mm_store_ss(c0, vacc0x0123);
321 }
322
323 nc = 0;
324 }
325 } while (nc != 0);
326}