blob: 3229cb16f54886c4d8483a9662b35b8659e45590 [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemminc_ukernel_7x8__avx_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const float*restrict acc,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080029{
30 assert(mr != 0);
31 assert(mr <= 7);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38 assert(acc != NULL);
39
40 const float* a0 = a;
41 float* c0 = c;
42 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
68 if XNN_UNPREDICTABLE(mr < 6) {
69 a5 = a4;
70 c5 = c4;
71 }
72 const float* a6 = (const float*) ((uintptr_t) a5 + a_stride);
73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
74 if XNN_UNPREDICTABLE(mr <= 6) {
75 a6 = a5;
76 c6 = c5;
77 }
78
79 do {
80 __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
81 __m256 vacc1x01234567 = _mm256_load_ps(acc + 8);
82 __m256 vacc2x01234567 = _mm256_load_ps(acc + 16);
83 __m256 vacc3x01234567 = _mm256_load_ps(acc + 24);
84 __m256 vacc4x01234567 = _mm256_load_ps(acc + 32);
85 __m256 vacc5x01234567 = _mm256_load_ps(acc + 40);
86 __m256 vacc6x01234567 = _mm256_load_ps(acc + 48);
87 acc += 56;
88
89 size_t k = kc;
90 do {
91 const __m256 va0 = _mm256_broadcast_ss(a0);
92 a0 += 1;
93 const __m256 va1 = _mm256_broadcast_ss(a1);
94 a1 += 1;
95 const __m256 va2 = _mm256_broadcast_ss(a2);
96 a2 += 1;
97 const __m256 va3 = _mm256_broadcast_ss(a3);
98 a3 += 1;
99 const __m256 va4 = _mm256_broadcast_ss(a4);
100 a4 += 1;
101 const __m256 va5 = _mm256_broadcast_ss(a5);
102 a5 += 1;
103 const __m256 va6 = _mm256_broadcast_ss(a6);
104 a6 += 1;
105
106 const __m256 vb01234567 = _mm256_load_ps(w);
107 w += 8;
108
109 vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
110 vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
111 vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
112 vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
113 vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
114 vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
115 vacc6x01234567 = _mm256_add_ps(vacc6x01234567, _mm256_mul_ps(va6, vb01234567));
116
117 k -= sizeof(float);
118 } while (k != 0);
119
120 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
121 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
122 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
123 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
124 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
125 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
126 vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
127 vacc6x01234567 = _mm256_min_ps(vacc6x01234567, vmax);
128
129 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
130 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
131 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
132 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
133 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
134 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
135 vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
136 vacc6x01234567 = _mm256_max_ps(vacc6x01234567, vmin);
137
138 if XNN_LIKELY(nc >= 8) {
139 _mm256_storeu_ps(c6, vacc6x01234567);
140 c6 = (float*) ((uintptr_t) c6 + cn_stride);
141 _mm256_storeu_ps(c5, vacc5x01234567);
142 c5 = (float*) ((uintptr_t) c5 + cn_stride);
143 _mm256_storeu_ps(c4, vacc4x01234567);
144 c4 = (float*) ((uintptr_t) c4 + cn_stride);
145 _mm256_storeu_ps(c3, vacc3x01234567);
146 c3 = (float*) ((uintptr_t) c3 + cn_stride);
147 _mm256_storeu_ps(c2, vacc2x01234567);
148 c2 = (float*) ((uintptr_t) c2 + cn_stride);
149 _mm256_storeu_ps(c1, vacc1x01234567);
150 c1 = (float*) ((uintptr_t) c1 + cn_stride);
151 _mm256_storeu_ps(c0, vacc0x01234567);
152 c0 = (float*) ((uintptr_t) c0 + cn_stride);
153
154 a6 = (const float*) ((uintptr_t) a6 - kc);
155 a5 = (const float*) ((uintptr_t) a5 - kc);
156 a4 = (const float*) ((uintptr_t) a4 - kc);
157 a3 = (const float*) ((uintptr_t) a3 - kc);
158 a2 = (const float*) ((uintptr_t) a2 - kc);
159 a1 = (const float*) ((uintptr_t) a1 - kc);
160 a0 = (const float*) ((uintptr_t) a0 - kc);
161
162 nc -= 8;
163 } else {
164 __m128 vacc6x0123 = _mm256_castps256_ps128(vacc6x01234567);
165 __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
166 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
167 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
168 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
169 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
170 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
171 if (nc & 4) {
172 _mm_storeu_ps(c6, vacc6x0123);
173 _mm_storeu_ps(c5, vacc5x0123);
174 _mm_storeu_ps(c4, vacc4x0123);
175 _mm_storeu_ps(c3, vacc3x0123);
176 _mm_storeu_ps(c2, vacc2x0123);
177 _mm_storeu_ps(c1, vacc1x0123);
178 _mm_storeu_ps(c0, vacc0x0123);
179
180 vacc6x0123 = _mm256_extractf128_ps(vacc6x01234567, 1);
181 vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
182 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
183 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
184 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
185 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
186 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
187
188 c6 += 4;
189 c5 += 4;
190 c4 += 4;
191 c3 += 4;
192 c2 += 4;
193 c1 += 4;
194 c0 += 4;
195 }
196 if (nc & 2) {
197 _mm_storel_pi((__m64*) c6, vacc6x0123);
198 _mm_storel_pi((__m64*) c5, vacc5x0123);
199 _mm_storel_pi((__m64*) c4, vacc4x0123);
200 _mm_storel_pi((__m64*) c3, vacc3x0123);
201 _mm_storel_pi((__m64*) c2, vacc2x0123);
202 _mm_storel_pi((__m64*) c1, vacc1x0123);
203 _mm_storel_pi((__m64*) c0, vacc0x0123);
204
205 vacc6x0123 = _mm_movehl_ps(vacc6x0123, vacc6x0123);
206 vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
207 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
208 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
209 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
210 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
211 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
212
213 c6 += 2;
214 c5 += 2;
215 c4 += 2;
216 c3 += 2;
217 c2 += 2;
218 c1 += 2;
219 c0 += 2;
220 }
221 if (nc & 1) {
222 _mm_store_ss(c6, vacc6x0123);
223 _mm_store_ss(c5, vacc5x0123);
224 _mm_store_ss(c4, vacc4x0123);
225 _mm_store_ss(c3, vacc3x0123);
226 _mm_store_ss(c2, vacc2x0123);
227 _mm_store_ss(c1, vacc1x0123);
228 _mm_store_ss(c0, vacc0x0123);
229 }
230
231 nc = 0;
232 }
233 } while (nc != 0);
234}