blob: a56bf262432fa29e65f4e4a5b7f57dc8466499fe [file] [log] [blame]
Marat Dukhanfda12b82019-11-21 12:27:59 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/avx-broadcast.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_6x8__avx_broadcast(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070027 const union xnn_f32_minmax_params params[restrict static 1])
Marat Dukhanfda12b82019-11-21 12:27:59 -080028{
29 assert(mr != 0);
30 assert(mr <= 6);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66 if XNN_UNPREDICTABLE(mr != 6) {
67 a5 = a4;
68 c5 = c4;
69 }
70
71 do {
72 __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
73 __m256 vacc1x01234567 = vacc0x01234567;
74 __m256 vacc2x01234567 = vacc0x01234567;
75 __m256 vacc3x01234567 = vacc0x01234567;
76 __m256 vacc4x01234567 = vacc0x01234567;
77 __m256 vacc5x01234567 = vacc0x01234567;
78 w += 8;
79
80 size_t k = kc;
81 do {
82 const __m256 va0 = _mm256_broadcast_ss(a0);
83 a0 += 1;
84 const __m256 va1 = _mm256_broadcast_ss(a1);
85 a1 += 1;
86 const __m256 va2 = _mm256_broadcast_ss(a2);
87 a2 += 1;
88 const __m256 va3 = _mm256_broadcast_ss(a3);
89 a3 += 1;
90 const __m256 va4 = _mm256_broadcast_ss(a4);
91 a4 += 1;
92 const __m256 va5 = _mm256_broadcast_ss(a5);
93 a5 += 1;
94
95 const __m256 vb01234567 = _mm256_load_ps(w);
96 w += 8;
97
98 vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
99 vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
100 vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
101 vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
102 vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
103 vacc5x01234567 = _mm256_add_ps(vacc5x01234567, _mm256_mul_ps(va5, vb01234567));
104
105 k -= sizeof(float);
106 } while (k != 0);
107
108 const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
109 vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
110 vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
111 vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
112 vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
113 vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
114 vacc5x01234567 = _mm256_min_ps(vacc5x01234567, vmax);
115
116 const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
117 vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
118 vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
119 vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
120 vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
121 vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
122 vacc5x01234567 = _mm256_max_ps(vacc5x01234567, vmin);
123
124 if XNN_LIKELY(nc >= 8) {
125 _mm256_storeu_ps(c5, vacc5x01234567);
126 c5 = (float*) ((uintptr_t) c5 + cn_stride);
127 _mm256_storeu_ps(c4, vacc4x01234567);
128 c4 = (float*) ((uintptr_t) c4 + cn_stride);
129 _mm256_storeu_ps(c3, vacc3x01234567);
130 c3 = (float*) ((uintptr_t) c3 + cn_stride);
131 _mm256_storeu_ps(c2, vacc2x01234567);
132 c2 = (float*) ((uintptr_t) c2 + cn_stride);
133 _mm256_storeu_ps(c1, vacc1x01234567);
134 c1 = (float*) ((uintptr_t) c1 + cn_stride);
135 _mm256_storeu_ps(c0, vacc0x01234567);
136 c0 = (float*) ((uintptr_t) c0 + cn_stride);
137
138 a5 = (const float*) ((uintptr_t) a5 - kc);
139 a4 = (const float*) ((uintptr_t) a4 - kc);
140 a3 = (const float*) ((uintptr_t) a3 - kc);
141 a2 = (const float*) ((uintptr_t) a2 - kc);
142 a1 = (const float*) ((uintptr_t) a1 - kc);
143 a0 = (const float*) ((uintptr_t) a0 - kc);
144
145 nc -= 8;
146 } else {
147 __m128 vacc5x0123 = _mm256_castps256_ps128(vacc5x01234567);
148 __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
149 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
150 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
151 __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
152 __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
153 if (nc & 4) {
154 _mm_storeu_ps(c5, vacc5x0123);
155 _mm_storeu_ps(c4, vacc4x0123);
156 _mm_storeu_ps(c3, vacc3x0123);
157 _mm_storeu_ps(c2, vacc2x0123);
158 _mm_storeu_ps(c1, vacc1x0123);
159 _mm_storeu_ps(c0, vacc0x0123);
160
161 vacc5x0123 = _mm256_extractf128_ps(vacc5x01234567, 1);
162 vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
163 vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
164 vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
165 vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
166 vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
167
168 c5 += 4;
169 c4 += 4;
170 c3 += 4;
171 c2 += 4;
172 c1 += 4;
173 c0 += 4;
174 }
175 if (nc & 2) {
176 _mm_storel_pi((__m64*) c5, vacc5x0123);
177 _mm_storel_pi((__m64*) c4, vacc4x0123);
178 _mm_storel_pi((__m64*) c3, vacc3x0123);
179 _mm_storel_pi((__m64*) c2, vacc2x0123);
180 _mm_storel_pi((__m64*) c1, vacc1x0123);
181 _mm_storel_pi((__m64*) c0, vacc0x0123);
182
183 vacc5x0123 = _mm_movehl_ps(vacc5x0123, vacc5x0123);
184 vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
185 vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
186 vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
187 vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
188 vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
189
190 c5 += 2;
191 c4 += 2;
192 c3 += 2;
193 c2 += 2;
194 c1 += 2;
195 c0 += 2;
196 }
197 if (nc & 1) {
198 _mm_store_ss(c5, vacc5x0123);
199 _mm_store_ss(c4, vacc4x0123);
200 _mm_store_ss(c3, vacc3x0123);
201 _mm_store_ss(c2, vacc2x0123);
202 _mm_store_ss(c1, vacc1x0123);
203 _mm_store_ss(c0, vacc0x0123);
204 }
205
206 nc = 0;
207 }
208 } while (nc != 0);
209}