blob: 562da921373f281ec9f3bb087655a16f0ea7146e [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-igemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/igemm.h>
15
16
17void xnn_f32_igemm_ukernel_6x8__avx_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const float**restrict a,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const float* zero,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070029 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080030{
31 assert(mr != 0);
32 assert(mr <= 6);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(kc % sizeof(float) == 0);
36 assert(ks != 0);
37 assert(ks % (6 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(float) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 float* c0 = c;
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr < 4) {
54 c3 = c2;
55 }
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57 if XNN_UNPREDICTABLE(mr <= 4) {
58 c4 = c3;
59 }
60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
61 if XNN_UNPREDICTABLE(mr != 6) {
62 c5 = c4;
63 }
64
65 do {
66 __m256 vacc0x01234567 = _mm256_load_ps(w);
67 __m256 vacc1x01234567 = vacc0x01234567;
68 __m256 vacc2x01234567 = vacc0x01234567;
69 __m256 vacc3x01234567 = vacc0x01234567;
70 __m256 vacc4x01234567 = vacc0x01234567;
71 __m256 vacc5x01234567 = vacc0x01234567;
72 w += 8;
73
74 size_t p = ks;
75 do {
76 const float* restrict a0 = a[0];
77 assert(a0 != NULL);
78 if XNN_UNPREDICTABLE(a0 != zero) {
79 a0 = (const float*) ((uintptr_t) a0 + a_offset);
80 }
81 const float* restrict a1 = a[1];
82 assert(a1 != NULL);
83 if XNN_UNPREDICTABLE(a1 != zero) {
84 a1 = (const float*) ((uintptr_t) a1 + a_offset);
85 }
86 const float* restrict a2 = a[2];
87 assert(a2 != NULL);
88 if XNN_UNPREDICTABLE(a2 != zero) {
89 a2 = (const float*) ((uintptr_t) a2 + a_offset);
90 }
91 const float* restrict a3 = a[3];
92 assert(a3 != NULL);
93 if XNN_UNPREDICTABLE(a3 != zero) {
94 a3 = (const float*) ((uintptr_t) a3 + a_offset);
95 }
96 const float* restrict a4 = a[4];
97 assert(a4 != NULL);
98 if XNN_UNPREDICTABLE(a4 != zero) {
99 a4 = (const float*) ((uintptr_t) a4 + a_offset);
100 }
101 const float* restrict a5 = a[5];
102 assert(a5 != NULL);
103 if XNN_UNPREDICTABLE(a5 != zero) {
104 a5 = (const float*) ((uintptr_t) a5 + a_offset);
105 }
106 a += 6;
107
108 size_t k = kc;
109 do {
110 const __m256 vb01234567 = _mm256_load_ps(w);
111 w += 8;
112
113 const __m256 va0 = _mm256_broadcast_ss(a0);
114 a0 += 1;
115 const __m256 va1 = _mm256_broadcast_ss(a1);
116 a1 += 1;
117 const __m256 va2 = _mm256_broadcast_ss(a2);
118 a2 += 1;
119 const __m256 va3 = _mm256_broadcast_ss(a3);
120 a3 += 1;
121 const __m256 va4 = _mm256_broadcast_ss(a4);
122 a4 += 1;
123 const __m256 va5 = _mm256_broadcast_ss(a5);
124 a5 += 1;
125
126 vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
127 vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
128 vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
129 vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
130 vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
131 vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
132 k -= sizeof(float);
133 } while (k != 0);
134 p -= 6 * sizeof(void*);
135 } while (p != 0);
136
137 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
138 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
139 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
140 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
141 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
142 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
143 vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
144
145 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
146 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
147 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
148 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
149 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
150 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
151 vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
152
153 if XNN_LIKELY(nc >= 8) {
154 _mm256_storeu_ps(c5, vacc5x01234567);
155 c5 = (float*) ((uintptr_t) c5 + cn_stride);
156 _mm256_storeu_ps(c4, vacc4x01234567);
157 c4 = (float*) ((uintptr_t) c4 + cn_stride);
158 _mm256_storeu_ps(c3, vacc3x01234567);
159 c3 = (float*) ((uintptr_t) c3 + cn_stride);
160 _mm256_storeu_ps(c2, vacc2x01234567);
161 c2 = (float*) ((uintptr_t) c2 + cn_stride);
162 _mm256_storeu_ps(c1, vacc1x01234567);
163 c1 = (float*) ((uintptr_t) c1 + cn_stride);
164 _mm256_storeu_ps(c0, vacc0x01234567);
165 c0 = (float*) ((uintptr_t) c0 + cn_stride);
166
167 a = (const float**restrict) ((uintptr_t) a - ks);
168 nc -= 8;
169 } else {
170 __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
171 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
172 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
173 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
174 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
175 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
176 if (nc & 4) {
177 _mm_storeu_ps(c5, vacc5x0123);
178 _mm_storeu_ps(c4, vacc4x0123);
179 _mm_storeu_ps(c3, vacc3x0123);
180 _mm_storeu_ps(c2, vacc2x0123);
181 _mm_storeu_ps(c1, vacc1x0123);
182 _mm_storeu_ps(c0, vacc0x0123);
183
184 vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
185 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
186 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
187 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
188 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
189 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
190
191 c5 += 4;
192 c4 += 4;
193 c3 += 4;
194 c2 += 4;
195 c1 += 4;
196 c0 += 4;
197 }
198 if (nc & 2) {
199 _mm_storel_pi((__m64*) c5, vacc5x0123);
200 _mm_storel_pi((__m64*) c4, vacc4x0123);
201 _mm_storel_pi((__m64*) c3, vacc3x0123);
202 _mm_storel_pi((__m64*) c2, vacc2x0123);
203 _mm_storel_pi((__m64*) c1, vacc1x0123);
204 _mm_storel_pi((__m64*) c0, vacc0x0123);
205
206 vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
207 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
208 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
209 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
210 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
211 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
212
213 c5 += 2;
214 c4 += 2;
215 c3 += 2;
216 c2 += 2;
217 c1 += 2;
218 c0 += 2;
219 }
220 if (nc & 1) {
221 _mm_store_ss(c5, vacc5x0123);
222 _mm_store_ss(c4, vacc4x0123);
223 _mm_store_ss(c3, vacc3x0123);
224 _mm_store_ss(c2, vacc2x0123);
225 _mm_store_ss(c1, vacc1x0123);
226 _mm_store_ss(c0, vacc0x0123);
227 }
228
229 nc = 0;
230 }
231 } while (nc != 0);
232}