blob: 5309b436808348b682912876d6194e3d1665765a [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-igemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/igemm.h>
15
16
17void xnn_f32_igemm_ukernel_8x8__fma3_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070029 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080030{
31 assert(mr != 0);
32 assert(mr <= 8);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (8 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr < 4) {
54 c3 = c2;
55 }
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57 if XNN_UNPREDICTABLE(mr <= 4) {
58 c4 = c3;
59 }
60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
61 if XNN_UNPREDICTABLE(mr < 6) {
62 c5 = c4;
63 }
64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
65 if XNN_UNPREDICTABLE(mr <= 6) {
66 c6 = c5;
67 }
68 float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
69 if XNN_UNPREDICTABLE(mr != 8) {
70 c7 = c6;
71 }
72
73 do {
74 __m256 vacc0x01234567 = _mm256_load_ps(w);
75 __m256 vacc1x01234567 = vacc0x01234567;
76 __m256 vacc2x01234567 = vacc0x01234567;
77 __m256 vacc3x01234567 = vacc0x01234567;
78 __m256 vacc4x01234567 = vacc0x01234567;
79 __m256 vacc5x01234567 = vacc0x01234567;
80 __m256 vacc6x01234567 = vacc0x01234567;
81 __m256 vacc7x01234567 = vacc0x01234567;
82 w += 8;
83
84 size_t p = ks;
85 do {
86 const float* restrict a0 = a[0];
87 assert(a0 != NULL);
88 if XNN_UNPREDICTABLE(a0 != zero) {
89 a0 = (const float*) ((uintptr_t) a0 + a_offset);
90 }
91 const float* restrict a1 = a[1];
92 assert(a1 != NULL);
93 if XNN_UNPREDICTABLE(a1 != zero) {
94 a1 = (const float*) ((uintptr_t) a1 + a_offset);
95 }
96 const float* restrict a2 = a[2];
97 assert(a2 != NULL);
98 if XNN_UNPREDICTABLE(a2 != zero) {
99 a2 = (const float*) ((uintptr_t) a2 + a_offset);
100 }
101 const float* restrict a3 = a[3];
102 assert(a3 != NULL);
103 if XNN_UNPREDICTABLE(a3 != zero) {
104 a3 = (const float*) ((uintptr_t) a3 + a_offset);
105 }
106 const float* restrict a4 = a[4];
107 assert(a4 != NULL);
108 if XNN_UNPREDICTABLE(a4 != zero) {
109 a4 = (const float*) ((uintptr_t) a4 + a_offset);
110 }
111 const float* restrict a5 = a[5];
112 assert(a5 != NULL);
113 if XNN_UNPREDICTABLE(a5 != zero) {
114 a5 = (const float*) ((uintptr_t) a5 + a_offset);
115 }
116 const float* restrict a6 = a[6];
117 assert(a6 != NULL);
118 if XNN_UNPREDICTABLE(a6 != zero) {
119 a6 = (const float*) ((uintptr_t) a6 + a_offset);
120 }
121 const float* restrict a7 = a[7];
122 assert(a7 != NULL);
123 if XNN_UNPREDICTABLE(a7 != zero) {
124 a7 = (const float*) ((uintptr_t) a7 + a_offset);
125 }
126 a += 8;
127
128 size_t k = kc;
129 do {
130 const __m256 vb01234567 = _mm256_load_ps(w);
131 w += 8;
132
133 const __m256 va0 = _mm256_broadcast_ss(a0);
134 a0 += 1;
135 const __m256 va1 = _mm256_broadcast_ss(a1);
136 a1 += 1;
137 const __m256 va2 = _mm256_broadcast_ss(a2);
138 a2 += 1;
139 const __m256 va3 = _mm256_broadcast_ss(a3);
140 a3 += 1;
141 const __m256 va4 = _mm256_broadcast_ss(a4);
142 a4 += 1;
143 const __m256 va5 = _mm256_broadcast_ss(a5);
144 a5 += 1;
145 const __m256 va6 = _mm256_broadcast_ss(a6);
146 a6 += 1;
147 const __m256 va7 = _mm256_broadcast_ss(a7);
148 a7 += 1;
149
150 vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
151 vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
152 vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
153 vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
154 vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
155 vacc5x01234567 = _mm256_fmadd_ps(va5, vb01234567, vacc5x01234567);
156 vacc6x01234567 = _mm256_fmadd_ps(va6, vb01234567, vacc6x01234567);
157 vacc7x01234567 = _mm256_fmadd_ps(va7, vb01234567, vacc7x01234567);
158 k -= sizeof(float);
159 } while (k != 0);
160 p -= 8 * sizeof(void*);
161 } while (p != 0);
162
163 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
164 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
165 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
166 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
167 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
168 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
169 vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
170 vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
171 vacc7x01234567 = _mm256_min_ps(vacc7x01234567, vmax);
172
173 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
174 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
175 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
176 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
177 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
178 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
179 vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
180 vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
181 vacc7x01234567 = _mm256_max_ps(vacc7x01234567, vmin);
182
183 if XNN_LIKELY(nc >= 8) {
184 _mm256_storeu_ps(c7, vacc7x01234567);
185 c7 = (float*) ((uintptr_t) c7 + cn_stride);
186 _mm256_storeu_ps(c6, vacc6x01234567);
187 c6 = (float*) ((uintptr_t) c6 + cn_stride);
188 _mm256_storeu_ps(c5, vacc5x01234567);
189 c5 = (float*) ((uintptr_t) c5 + cn_stride);
190 _mm256_storeu_ps(c4, vacc4x01234567);
191 c4 = (float*) ((uintptr_t) c4 + cn_stride);
192 _mm256_storeu_ps(c3, vacc3x01234567);
193 c3 = (float*) ((uintptr_t) c3 + cn_stride);
194 _mm256_storeu_ps(c2, vacc2x01234567);
195 c2 = (float*) ((uintptr_t) c2 + cn_stride);
196 _mm256_storeu_ps(c1, vacc1x01234567);
197 c1 = (float*) ((uintptr_t) c1 + cn_stride);
198 _mm256_storeu_ps(c0, vacc0x01234567);
199 c0 = (float*) ((uintptr_t) c0 + cn_stride);
200
201 a = (const float**restrict) ((uintptr_t) a - ks);
202 nc -= 8;
203 } else {
204 __m128 vacc7x0123 = _mm256_castps256_ps128(vacc7x01234567);
205 __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
206 __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
207 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
208 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
209 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
210 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
211 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
212 if (nc & 4) {
213 _mm_storeu_ps(c7, vacc7x0123);
214 _mm_storeu_ps(c6, vacc6x0123);
215 _mm_storeu_ps(c5, vacc5x0123);
216 _mm_storeu_ps(c4, vacc4x0123);
217 _mm_storeu_ps(c3, vacc3x0123);
218 _mm_storeu_ps(c2, vacc2x0123);
219 _mm_storeu_ps(c1, vacc1x0123);
220 _mm_storeu_ps(c0, vacc0x0123);
221
222 vacc7x0123 = _mm256_extractf128_ps(vacc7x01234567, 1);
223 vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
224 vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
225 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
226 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
227 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
228 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
229 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
230
231 c7 += 4;
232 c6 += 4;
233 c5 += 4;
234 c4 += 4;
235 c3 += 4;
236 c2 += 4;
237 c1 += 4;
238 c0 += 4;
239 }
240 if (nc & 2) {
241 _mm_storel_pi((__m64*) c7, vacc7x0123);
242 _mm_storel_pi((__m64*) c6, vacc6x0123);
243 _mm_storel_pi((__m64*) c5, vacc5x0123);
244 _mm_storel_pi((__m64*) c4, vacc4x0123);
245 _mm_storel_pi((__m64*) c3, vacc3x0123);
246 _mm_storel_pi((__m64*) c2, vacc2x0123);
247 _mm_storel_pi((__m64*) c1, vacc1x0123);
248 _mm_storel_pi((__m64*) c0, vacc0x0123);
249
250 vacc7x0123 = _mm_movehl_ps(vacc7x0123, vacc7x0123);
251 vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
252 vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
253 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
254 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
255 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
256 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
257 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
258
259 c7 += 2;
260 c6 += 2;
261 c5 += 2;
262 c4 += 2;
263 c3 += 2;
264 c2 += 2;
265 c1 += 2;
266 c0 += 2;
267 }
268 if (nc & 1) {
269 _mm_store_ss(c7, vacc7x0123);
270 _mm_store_ss(c6, vacc6x0123);
271 _mm_store_ss(c5, vacc5x0123);
272 _mm_store_ss(c4, vacc4x0123);
273 _mm_store_ss(c3, vacc3x0123);
274 _mm_store_ss(c2, vacc2x0123);
275 _mm_store_ss(c1, vacc1x0123);
276 _mm_store_ss(c0, vacc0x0123);
277 }
278
279 nc = 0;
280 }
281 } while (nc != 0);
282}