blob: e772214ab49387cfea6ad2ff52c0d2d773e22c0a [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/psimd-loadsplat.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <psimd.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_f32_gemm_ukernel_6x8__psimd_loadsplat(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 size_t a_stride,
23 const float*restrict w,
24 float*restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070027 const union xnn_f32_minmax_params params[restrict static 1])
XNNPACK Teamb455b122019-09-27 18:10:33 -070028{
29 assert(mr != 0);
30 assert(mr <= 6);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(float) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 const float* a0 = a;
39 float* c0 = c;
40 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
42 if XNN_UNPREDICTABLE(mr < 2) {
43 a1 = a0;
44 c1 = c0;
45 }
46 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 a2 = a1;
50 c2 = c1;
51 }
52 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 a3 = a2;
56 c3 = c2;
57 }
58 const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
60 if XNN_UNPREDICTABLE(mr <= 4) {
61 a4 = a3;
62 c4 = c3;
63 }
64 const float* a5 = (const float*) ((uintptr_t) a4 + a_stride);
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
66 if XNN_UNPREDICTABLE(mr != 6) {
67 a5 = a4;
68 c5 = c4;
69 }
70
71 do {
72 psimd_f32 vacc0x0123 = psimd_load_f32(w + 0);
73 psimd_f32 vacc0x4567 = psimd_load_f32(w + 4);
74 psimd_f32 vacc1x0123 = vacc0x0123;
75 psimd_f32 vacc1x4567 = vacc0x4567;
76 psimd_f32 vacc2x0123 = vacc0x0123;
77 psimd_f32 vacc2x4567 = vacc0x4567;
78 psimd_f32 vacc3x0123 = vacc0x0123;
79 psimd_f32 vacc3x4567 = vacc0x4567;
80 psimd_f32 vacc4x0123 = vacc0x0123;
81 psimd_f32 vacc4x4567 = vacc0x4567;
82 psimd_f32 vacc5x0123 = vacc0x0123;
83 psimd_f32 vacc5x4567 = vacc0x4567;
84 w += 8;
85
86 size_t k = kc;
87 do {
88 const psimd_f32 va0 = psimd_load_splat_f32(a0);
89 a0 += 1;
90 const psimd_f32 va1 = psimd_load_splat_f32(a1);
91 a1 += 1;
92 const psimd_f32 va2 = psimd_load_splat_f32(a2);
93 a2 += 1;
94 const psimd_f32 va3 = psimd_load_splat_f32(a3);
95 a3 += 1;
96 const psimd_f32 va4 = psimd_load_splat_f32(a4);
97 a4 += 1;
98 const psimd_f32 va5 = psimd_load_splat_f32(a5);
99 a5 += 1;
100
101 const psimd_f32 vb0123 = psimd_load_f32(w);
102 const psimd_f32 vb4567 = psimd_load_f32(w + 4);
103 w += 8;
104
105 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0, vb0123);
106 vacc1x0123 = psimd_qfma_f32(vacc1x0123, va1, vb0123);
107 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2, vb0123);
108 vacc3x0123 = psimd_qfma_f32(vacc3x0123, va3, vb0123);
109 vacc4x0123 = psimd_qfma_f32(vacc4x0123, va4, vb0123);
110 vacc5x0123 = psimd_qfma_f32(vacc5x0123, va5, vb0123);
111 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0, vb4567);
112 vacc1x4567 = psimd_qfma_f32(vacc1x4567, va1, vb4567);
113 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2, vb4567);
114 vacc3x4567 = psimd_qfma_f32(vacc3x4567, va3, vb4567);
115 vacc4x4567 = psimd_qfma_f32(vacc4x4567, va4, vb4567);
116 vacc5x4567 = psimd_qfma_f32(vacc5x4567, va5, vb4567);
117
118 k -= sizeof(float);
119 } while (k != 0);
120
121 const psimd_f32 vmax = psimd_load_splat_f32(&params->scalar.max);
122 vacc0x0123 = psimd_min_f32(vacc0x0123, vmax);
123 vacc1x0123 = psimd_min_f32(vacc1x0123, vmax);
124 vacc2x0123 = psimd_min_f32(vacc2x0123, vmax);
125 vacc3x0123 = psimd_min_f32(vacc3x0123, vmax);
126 vacc4x0123 = psimd_min_f32(vacc4x0123, vmax);
127 vacc5x0123 = psimd_min_f32(vacc5x0123, vmax);
128 vacc0x4567 = psimd_min_f32(vacc0x4567, vmax);
129 vacc1x4567 = psimd_min_f32(vacc1x4567, vmax);
130 vacc2x4567 = psimd_min_f32(vacc2x4567, vmax);
131 vacc3x4567 = psimd_min_f32(vacc3x4567, vmax);
132 vacc4x4567 = psimd_min_f32(vacc4x4567, vmax);
133 vacc5x4567 = psimd_min_f32(vacc5x4567, vmax);
134
135 const psimd_f32 vmin = psimd_load_splat_f32(&params->scalar.min);
136 vacc0x0123 = psimd_max_f32(vacc0x0123, vmin);
137 vacc1x0123 = psimd_max_f32(vacc1x0123, vmin);
138 vacc2x0123 = psimd_max_f32(vacc2x0123, vmin);
139 vacc3x0123 = psimd_max_f32(vacc3x0123, vmin);
140 vacc4x0123 = psimd_max_f32(vacc4x0123, vmin);
141 vacc5x0123 = psimd_max_f32(vacc5x0123, vmin);
142 vacc0x4567 = psimd_max_f32(vacc0x4567, vmin);
143 vacc1x4567 = psimd_max_f32(vacc1x4567, vmin);
144 vacc2x4567 = psimd_max_f32(vacc2x4567, vmin);
145 vacc3x4567 = psimd_max_f32(vacc3x4567, vmin);
146 vacc4x4567 = psimd_max_f32(vacc4x4567, vmin);
147 vacc5x4567 = psimd_max_f32(vacc5x4567, vmin);
148
149 if XNN_LIKELY(nc >= 8) {
150 psimd_store_f32(c5, vacc5x0123);
151 psimd_store_f32(c5 + 4, vacc5x4567);
152 c5 = (float*) ((uintptr_t) c5 + cn_stride);
153 psimd_store_f32(c4, vacc4x0123);
154 psimd_store_f32(c4 + 4, vacc4x4567);
155 c4 = (float*) ((uintptr_t) c4 + cn_stride);
156 psimd_store_f32(c3, vacc3x0123);
157 psimd_store_f32(c3 + 4, vacc3x4567);
158 c3 = (float*) ((uintptr_t) c3 + cn_stride);
159 psimd_store_f32(c2, vacc2x0123);
160 psimd_store_f32(c2 + 4, vacc2x4567);
161 c2 = (float*) ((uintptr_t) c2 + cn_stride);
162 psimd_store_f32(c1, vacc1x0123);
163 psimd_store_f32(c1 + 4, vacc1x4567);
164 c1 = (float*) ((uintptr_t) c1 + cn_stride);
165 psimd_store_f32(c0, vacc0x0123);
166 psimd_store_f32(c0 + 4, vacc0x4567);
167 c0 = (float*) ((uintptr_t) c0 + cn_stride);
168
169 a5 = (const float*) ((uintptr_t) a5 - kc);
170 a4 = (const float*) ((uintptr_t) a4 - kc);
171 a3 = (const float*) ((uintptr_t) a3 - kc);
172 a2 = (const float*) ((uintptr_t) a2 - kc);
173 a1 = (const float*) ((uintptr_t) a1 - kc);
174 a0 = (const float*) ((uintptr_t) a0 - kc);
175
176 nc -= 8;
177 } else {
178 if (nc & 4) {
179 psimd_store_f32(c5, vacc5x0123);
180 psimd_store_f32(c4, vacc4x0123);
181 psimd_store_f32(c3, vacc3x0123);
182 psimd_store_f32(c2, vacc2x0123);
183 psimd_store_f32(c1, vacc1x0123);
184 psimd_store_f32(c0, vacc0x0123);
185
186 vacc5x0123 = vacc5x4567;
187 vacc4x0123 = vacc4x4567;
188 vacc3x0123 = vacc3x4567;
189 vacc2x0123 = vacc2x4567;
190 vacc1x0123 = vacc1x4567;
191 vacc0x0123 = vacc0x4567;
192
193 c5 += 4;
194 c4 += 4;
195 c3 += 4;
196 c2 += 4;
197 c1 += 4;
198 c0 += 4;
199 }
200 if (nc & 2) {
201 psimd_store2_f32(c5, vacc5x0123);
202 psimd_store2_f32(c4, vacc4x0123);
203 psimd_store2_f32(c3, vacc3x0123);
204 psimd_store2_f32(c2, vacc2x0123);
205 psimd_store2_f32(c1, vacc1x0123);
206 psimd_store2_f32(c0, vacc0x0123);
207
208 vacc5x0123 = psimd_concat_hi_f32(vacc5x0123, vacc5x0123);
209 vacc4x0123 = psimd_concat_hi_f32(vacc4x0123, vacc4x0123);
210 vacc3x0123 = psimd_concat_hi_f32(vacc3x0123, vacc3x0123);
211 vacc2x0123 = psimd_concat_hi_f32(vacc2x0123, vacc2x0123);
212 vacc1x0123 = psimd_concat_hi_f32(vacc1x0123, vacc1x0123);
213 vacc0x0123 = psimd_concat_hi_f32(vacc0x0123, vacc0x0123);
214
215 c5 += 2;
216 c4 += 2;
217 c3 += 2;
218 c2 += 2;
219 c1 += 2;
220 c0 += 2;
221 }
222 if (nc & 1) {
223 psimd_store1_f32(c5, vacc5x0123);
224 psimd_store1_f32(c4, vacc4x0123);
225 psimd_store1_f32(c3, vacc3x0123);
226 psimd_store1_f32(c2, vacc2x0123);
227 psimd_store1_f32(c1, vacc1x0123);
228 psimd_store1_f32(c0, vacc0x0123);
229 }
230
231 nc = 0;
232 }
233 } while (nc != 0);
234}