blob: dcf1be1a3cd9eb445d0d9e3d9cedbe733e1e0017 [file] [log] [blame]
Marat Dukhan479f87e2019-11-27 15:17:06 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-dwconv/up-avx512.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/dwconv.h>
15
16
17void xnn_f32_dwconv_ukernel_up16x25__avx512f_acc2(
18 size_t channels,
19 size_t output_width,
20 const float** input,
21 const float* weights,
22 float* output,
23 size_t input_stride,
24 size_t output_increment,
25 const union xnn_f32_output_params params[restrict static 1])
26{
27 assert(channels != 0);
28 assert(output_width != 0);
29
30 const __m512 vmax = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max));
31 const __m512 vmin = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min));
32 do {
33 const float* i0 = input[0];
34 const float* i1 = input[1];
35 const float* i2 = input[2];
36 const float* i3 = input[3];
37 const float* i4 = input[4];
38 const float* i5 = input[5];
39 const float* i6 = input[6];
40 const float* i7 = input[7];
41 const float* i8 = input[8];
42 const float* i9 = input[9];
43 const float* i10 = input[10];
44 const float* i11 = input[11];
45 const float* i12 = input[12];
46 const float* i13 = input[13];
47 const float* i14 = input[14];
48 const float* i15 = input[15];
49 const float* i16 = input[16];
50 const float* i17 = input[17];
51 const float* i18 = input[18];
52 const float* i19 = input[19];
53 const float* i20 = input[20];
54 const float* i21 = input[21];
55 const float* i22 = input[22];
56 const float* i23 = input[23];
57 const float* i24 = input[24];
58 input = (const float**) ((uintptr_t) input + input_stride);
59
60 size_t c = channels;
61 const float* w = weights;
62 for (; c >= 16; c -= 16) {
63 __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
64
65
66 const __m512 vi0x0123456789ABCDEF = _mm512_loadu_ps(i0);
67 i0 += 16;
68
69 const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
70 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
71
72 const __m512 vi1x0123456789ABCDEF = _mm512_loadu_ps(i1);
73 i1 += 16;
74
75 const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
76 __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
77
78 const __m512 vi2x0123456789ABCDEF = _mm512_loadu_ps(i2);
79 i2 += 16;
80
81 const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
82 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
83
84 const __m512 vi3x0123456789ABCDEF = _mm512_loadu_ps(i3);
85 i3 += 16;
86
87 const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
88 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
89
90 const __m512 vi4x0123456789ABCDEF = _mm512_loadu_ps(i4);
91 i4 += 16;
92
93 const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
94 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
95
96 const __m512 vi5x0123456789ABCDEF = _mm512_loadu_ps(i5);
97 i5 += 16;
98
99 const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
100 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
101
102 const __m512 vi6x0123456789ABCDEF = _mm512_loadu_ps(i6);
103 i6 += 16;
104
105 const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
106 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
107
108 const __m512 vi7x0123456789ABCDEF = _mm512_loadu_ps(i7);
109 i7 += 16;
110
111 const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
112 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
113
114 const __m512 vi8x0123456789ABCDEF = _mm512_loadu_ps(i8);
115 i8 += 16;
116
117 const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
118 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
119
120 const __m512 vi9x0123456789ABCDEF = _mm512_loadu_ps(i9);
121 i9 += 16;
122
123 const __m512 vk9x0123456789ABCDEF = _mm512_load_ps(w + 160);
124 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1);
125
126 const __m512 vi10x0123456789ABCDEF = _mm512_loadu_ps(i10);
127 i10 += 16;
128
129 const __m512 vk10x0123456789ABCDEF = _mm512_load_ps(w + 176);
130 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
131
132 const __m512 vi11x0123456789ABCDEF = _mm512_loadu_ps(i11);
133 i11 += 16;
134
135 const __m512 vk11x0123456789ABCDEF = _mm512_load_ps(w + 192);
136 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1);
137
138 const __m512 vi12x0123456789ABCDEF = _mm512_loadu_ps(i12);
139 i12 += 16;
140
141 const __m512 vk12x0123456789ABCDEF = _mm512_load_ps(w + 208);
142 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
143
144 const __m512 vi13x0123456789ABCDEF = _mm512_loadu_ps(i13);
145 i13 += 16;
146
147 const __m512 vk13x0123456789ABCDEF = _mm512_load_ps(w + 224);
148 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1);
149
150 const __m512 vi14x0123456789ABCDEF = _mm512_loadu_ps(i14);
151 i14 += 16;
152
153 const __m512 vk14x0123456789ABCDEF = _mm512_load_ps(w + 240);
154 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
155
156 const __m512 vi15x0123456789ABCDEF = _mm512_loadu_ps(i15);
157 i15 += 16;
158
159 const __m512 vk15x0123456789ABCDEF = _mm512_load_ps(w + 256);
160 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1);
161
162 const __m512 vi16x0123456789ABCDEF = _mm512_loadu_ps(i16);
163 i16 += 16;
164
165 const __m512 vk16x0123456789ABCDEF = _mm512_load_ps(w + 272);
166 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
167
168 const __m512 vi17x0123456789ABCDEF = _mm512_loadu_ps(i17);
169 i17 += 16;
170
171 const __m512 vk17x0123456789ABCDEF = _mm512_load_ps(w + 288);
172 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1);
173
174 const __m512 vi18x0123456789ABCDEF = _mm512_loadu_ps(i18);
175 i18 += 16;
176
177 const __m512 vk18x0123456789ABCDEF = _mm512_load_ps(w + 304);
178 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
179
180 const __m512 vi19x0123456789ABCDEF = _mm512_loadu_ps(i19);
181 i19 += 16;
182
183 const __m512 vk19x0123456789ABCDEF = _mm512_load_ps(w + 320);
184 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1);
185
186 const __m512 vi20x0123456789ABCDEF = _mm512_loadu_ps(i20);
187 i20 += 16;
188
189 const __m512 vk20x0123456789ABCDEF = _mm512_load_ps(w + 336);
190 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
191
192 const __m512 vi21x0123456789ABCDEF = _mm512_loadu_ps(i21);
193 i21 += 16;
194
195 const __m512 vk21x0123456789ABCDEF = _mm512_load_ps(w + 352);
196 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1);
197
198 const __m512 vi22x0123456789ABCDEF = _mm512_loadu_ps(i22);
199 i22 += 16;
200
201 const __m512 vk22x0123456789ABCDEF = _mm512_load_ps(w + 368);
202 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
203
204 const __m512 vi23x0123456789ABCDEF = _mm512_loadu_ps(i23);
205 i23 += 16;
206
207 const __m512 vk23x0123456789ABCDEF = _mm512_load_ps(w + 384);
208 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1);
209
210 const __m512 vi24x0123456789ABCDEF = _mm512_loadu_ps(i24);
211 i24 += 16;
212
213 const __m512 vk24x0123456789ABCDEF = _mm512_load_ps(w + 400);
214 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
215
216 w += 416;
217
218 // Add up all accumulators to vacc0123456789ABCDEFp0
219 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
220
221 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
222 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
223
224 _mm512_storeu_ps(output, vacc0123456789ABCDEF);
225 output += 16;
226 }
227 if XNN_UNLIKELY(c != 0) {
228 assert(c >= 1);
229 assert(c <= 16);
230 // Prepare mask for valid 32-bit elements (depends on nc).
231 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << c) - UINT32_C(1)));
232
233 __m512 vacc0123456789ABCDEFp0 = _mm512_load_ps(w);
234
235 const __m512 vi0x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i0);
236 const __m512 vk0x0123456789ABCDEF = _mm512_load_ps(w + 16);
237 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF, vacc0123456789ABCDEFp0);
238
239 const __m512 vi1x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i1);
240 const __m512 vk1x0123456789ABCDEF = _mm512_load_ps(w + 32);
241 __m512 vacc0123456789ABCDEFp1 = _mm512_mul_ps(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
242
243 const __m512 vi2x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i2);
244 const __m512 vk2x0123456789ABCDEF = _mm512_load_ps(w + 48);
245 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF, vacc0123456789ABCDEFp0);
246
247 const __m512 vi3x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i3);
248 const __m512 vk3x0123456789ABCDEF = _mm512_load_ps(w + 64);
249 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF, vacc0123456789ABCDEFp1);
250
251 const __m512 vi4x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i4);
252 const __m512 vk4x0123456789ABCDEF = _mm512_load_ps(w + 80);
253 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF, vacc0123456789ABCDEFp0);
254
255 const __m512 vi5x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i5);
256 const __m512 vk5x0123456789ABCDEF = _mm512_load_ps(w + 96);
257 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF, vacc0123456789ABCDEFp1);
258
259 const __m512 vi6x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i6);
260 const __m512 vk6x0123456789ABCDEF = _mm512_load_ps(w + 112);
261 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF, vacc0123456789ABCDEFp0);
262
263 const __m512 vi7x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i7);
264 const __m512 vk7x0123456789ABCDEF = _mm512_load_ps(w + 128);
265 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF, vacc0123456789ABCDEFp1);
266
267 const __m512 vi8x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i8);
268 const __m512 vk8x0123456789ABCDEF = _mm512_load_ps(w + 144);
269 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF, vacc0123456789ABCDEFp0);
270
271 const __m512 vi9x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i9);
272 const __m512 vk9x0123456789ABCDEF = _mm512_load_ps(w + 160);
273 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF, vacc0123456789ABCDEFp1);
274
275 const __m512 vi10x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i10);
276 const __m512 vk10x0123456789ABCDEF = _mm512_load_ps(w + 176);
277 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF, vacc0123456789ABCDEFp0);
278
279 const __m512 vi11x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i11);
280 const __m512 vk11x0123456789ABCDEF = _mm512_load_ps(w + 192);
281 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF, vacc0123456789ABCDEFp1);
282
283 const __m512 vi12x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i12);
284 const __m512 vk12x0123456789ABCDEF = _mm512_load_ps(w + 208);
285 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF, vacc0123456789ABCDEFp0);
286
287 const __m512 vi13x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i13);
288 const __m512 vk13x0123456789ABCDEF = _mm512_load_ps(w + 224);
289 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF, vacc0123456789ABCDEFp1);
290
291 const __m512 vi14x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i14);
292 const __m512 vk14x0123456789ABCDEF = _mm512_load_ps(w + 240);
293 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF, vacc0123456789ABCDEFp0);
294
295 const __m512 vi15x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i15);
296 const __m512 vk15x0123456789ABCDEF = _mm512_load_ps(w + 256);
297 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF, vacc0123456789ABCDEFp1);
298
299 const __m512 vi16x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i16);
300 const __m512 vk16x0123456789ABCDEF = _mm512_load_ps(w + 272);
301 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF, vacc0123456789ABCDEFp0);
302
303 const __m512 vi17x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i17);
304 const __m512 vk17x0123456789ABCDEF = _mm512_load_ps(w + 288);
305 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF, vacc0123456789ABCDEFp1);
306
307 const __m512 vi18x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i18);
308 const __m512 vk18x0123456789ABCDEF = _mm512_load_ps(w + 304);
309 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF, vacc0123456789ABCDEFp0);
310
311 const __m512 vi19x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i19);
312 const __m512 vk19x0123456789ABCDEF = _mm512_load_ps(w + 320);
313 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF, vacc0123456789ABCDEFp1);
314
315 const __m512 vi20x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i20);
316 const __m512 vk20x0123456789ABCDEF = _mm512_load_ps(w + 336);
317 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF, vacc0123456789ABCDEFp0);
318
319 const __m512 vi21x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i21);
320 const __m512 vk21x0123456789ABCDEF = _mm512_load_ps(w + 352);
321 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF, vacc0123456789ABCDEFp1);
322
323 const __m512 vi22x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i22);
324 const __m512 vk22x0123456789ABCDEF = _mm512_load_ps(w + 368);
325 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF, vacc0123456789ABCDEFp0);
326
327 const __m512 vi23x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i23);
328 const __m512 vk23x0123456789ABCDEF = _mm512_load_ps(w + 384);
329 vacc0123456789ABCDEFp1 = _mm512_fmadd_ps(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF, vacc0123456789ABCDEFp1);
330
331 const __m512 vi24x0123456789ABCDEF = _mm512_maskz_loadu_ps(vmask, i24);
332 const __m512 vk24x0123456789ABCDEF = _mm512_load_ps(w + 400);
333 vacc0123456789ABCDEFp0 = _mm512_fmadd_ps(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF, vacc0123456789ABCDEFp0);
334
335 // Add up all accumulators to vacc0123456789ABCDEFp0
336 vacc0123456789ABCDEFp0 = _mm512_add_ps(vacc0123456789ABCDEFp0, vacc0123456789ABCDEFp1);
337
338 __m512 vacc0123456789ABCDEF = _mm512_max_ps(vacc0123456789ABCDEFp0, vmin);
339 vacc0123456789ABCDEF = _mm512_min_ps(vacc0123456789ABCDEF, vmax);
340
341 _mm512_mask_storeu_ps(output, vmask, vacc0123456789ABCDEF);
342 output += c;
343 }
344
345 output = (float*) ((uintptr_t) output + output_increment);
346 } while (--output_width != 0);
347}