blob: 0eb67afe09763ed9802749f401aa6adbc25a02fd [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Auto-generated file. Do not edit!
2// Template: src/f32-ppmm/neon.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/ppmm.h>
15
16
17void xnn_f32_ppmm_ukernel_8x8__neonfma(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const float*restrict a,
22 const float*restrict w,
23 float*restrict c,
24 size_t cm_stride,
25 size_t cn_stride,
26 const union xnn_f32_output_params params[restrict static 1])
27{
28 assert(mr != 0);
29 assert(mr <= 8);
30 assert(nc != 0);
31 assert(kc != 0);
32 assert(kc % sizeof(float) == 0);
33
34 float* c0 = c;
35 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
36 if XNN_UNPREDICTABLE(mr < 2) {
37 c1 = c0;
38 }
39 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
40 if XNN_UNPREDICTABLE(mr <= 2) {
41 c2 = c1;
42 }
43 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 4) {
45 c3 = c2;
46 }
47 float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 4) {
49 c4 = c3;
50 }
51 float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
52 if XNN_UNPREDICTABLE(mr < 6) {
53 c5 = c4;
54 }
55 float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
56 if XNN_UNPREDICTABLE(mr <= 6) {
57 c6 = c5;
58 }
59 float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
60 if XNN_UNPREDICTABLE(mr != 8) {
61 c7 = c6;
62 }
63
64 do {
65 float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
66 float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
67 float32x4_t vacc1x0123 = vacc0x0123;
68 float32x4_t vacc1x4567 = vacc0x4567;
69 float32x4_t vacc2x0123 = vacc0x0123;
70 float32x4_t vacc2x4567 = vacc0x4567;
71 float32x4_t vacc3x0123 = vacc0x0123;
72 float32x4_t vacc3x4567 = vacc0x4567;
73 float32x4_t vacc4x0123 = vacc0x0123;
74 float32x4_t vacc4x4567 = vacc0x4567;
75 float32x4_t vacc5x0123 = vacc0x0123;
76 float32x4_t vacc5x4567 = vacc0x4567;
77 float32x4_t vacc6x0123 = vacc0x0123;
78 float32x4_t vacc6x4567 = vacc0x4567;
79 float32x4_t vacc7x0123 = vacc0x0123;
80 float32x4_t vacc7x4567 = vacc0x4567;
81
82 size_t k = kc;
83 do {
84 const float32x4_t va0123 = vld1q_f32(a); a += 4;
85 const float32x4_t va4567 = vld1q_f32(a); a += 4;
86
87 const float32x4_t vb0123 = vld1q_f32(w); w += 4;
88 const float32x4_t vb4567 = vld1q_f32(w); w += 4;
89
90 #ifdef __aarch64__
91 vacc0x0123 = vfmaq_laneq_f32(vacc0x0123, vb0123, va0123, 0);
92 vacc1x0123 = vfmaq_laneq_f32(vacc1x0123, vb0123, va0123, 1);
93 vacc2x0123 = vfmaq_laneq_f32(vacc2x0123, vb0123, va0123, 2);
94 vacc3x0123 = vfmaq_laneq_f32(vacc3x0123, vb0123, va0123, 3);
95 vacc4x0123 = vfmaq_laneq_f32(vacc4x0123, vb0123, va4567, 0);
96 vacc5x0123 = vfmaq_laneq_f32(vacc5x0123, vb0123, va4567, 1);
97 vacc6x0123 = vfmaq_laneq_f32(vacc6x0123, vb0123, va4567, 2);
98 vacc7x0123 = vfmaq_laneq_f32(vacc7x0123, vb0123, va4567, 3);
99 vacc0x4567 = vfmaq_laneq_f32(vacc0x4567, vb4567, va0123, 0);
100 vacc1x4567 = vfmaq_laneq_f32(vacc1x4567, vb4567, va0123, 1);
101 vacc2x4567 = vfmaq_laneq_f32(vacc2x4567, vb4567, va0123, 2);
102 vacc3x4567 = vfmaq_laneq_f32(vacc3x4567, vb4567, va0123, 3);
103 vacc4x4567 = vfmaq_laneq_f32(vacc4x4567, vb4567, va4567, 0);
104 vacc5x4567 = vfmaq_laneq_f32(vacc5x4567, vb4567, va4567, 1);
105 vacc6x4567 = vfmaq_laneq_f32(vacc6x4567, vb4567, va4567, 2);
106 vacc7x4567 = vfmaq_laneq_f32(vacc7x4567, vb4567, va4567, 3);
107 #else
108 const float32x4_t va0000 = vdupq_lane_f32(vget_low_f32(va0123), 0);
109 const float32x4_t va1111 = vdupq_lane_f32(vget_low_f32(va0123), 1);
110 const float32x4_t va2222 = vdupq_lane_f32(vget_high_f32(va0123), 0);
111 const float32x4_t va3333 = vdupq_lane_f32(vget_high_f32(va0123), 1);
112 const float32x4_t va4444 = vdupq_lane_f32(vget_low_f32(va4567), 0);
113 const float32x4_t va5555 = vdupq_lane_f32(vget_low_f32(va4567), 1);
114 const float32x4_t va6666 = vdupq_lane_f32(vget_high_f32(va4567), 0);
115 const float32x4_t va7777 = vdupq_lane_f32(vget_high_f32(va4567), 1);
116
117 vacc0x0123 = vfmaq_f32(vacc0x0123, va0000, vb0123);
118 vacc1x0123 = vfmaq_f32(vacc1x0123, va1111, vb0123);
119 vacc2x0123 = vfmaq_f32(vacc2x0123, va2222, vb0123);
120 vacc3x0123 = vfmaq_f32(vacc3x0123, va3333, vb0123);
121 vacc4x0123 = vfmaq_f32(vacc4x0123, va4444, vb0123);
122 vacc5x0123 = vfmaq_f32(vacc5x0123, va5555, vb0123);
123 vacc6x0123 = vfmaq_f32(vacc6x0123, va6666, vb0123);
124 vacc7x0123 = vfmaq_f32(vacc7x0123, va7777, vb0123);
125 vacc0x4567 = vfmaq_f32(vacc0x4567, va0000, vb4567);
126 vacc1x4567 = vfmaq_f32(vacc1x4567, va1111, vb4567);
127 vacc2x4567 = vfmaq_f32(vacc2x4567, va2222, vb4567);
128 vacc3x4567 = vfmaq_f32(vacc3x4567, va3333, vb4567);
129 vacc4x4567 = vfmaq_f32(vacc4x4567, va4444, vb4567);
130 vacc5x4567 = vfmaq_f32(vacc5x4567, va5555, vb4567);
131 vacc6x4567 = vfmaq_f32(vacc6x4567, va6666, vb4567);
132 vacc7x4567 = vfmaq_f32(vacc7x4567, va7777, vb4567);
133 #endif
134
135 k -= sizeof(float);
136 } while (k != 0);
137
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700138 const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
139 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
140 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
141 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
142 vacc3x0123 = vminq_f32(vacc3x0123, vmax);
143 vacc4x0123 = vminq_f32(vacc4x0123, vmax);
144 vacc5x0123 = vminq_f32(vacc5x0123, vmax);
145 vacc6x0123 = vminq_f32(vacc6x0123, vmax);
146 vacc7x0123 = vminq_f32(vacc7x0123, vmax);
147 vacc0x4567 = vminq_f32(vacc0x4567, vmax);
148 vacc1x4567 = vminq_f32(vacc1x4567, vmax);
149 vacc2x4567 = vminq_f32(vacc2x4567, vmax);
150 vacc3x4567 = vminq_f32(vacc3x4567, vmax);
151 vacc4x4567 = vminq_f32(vacc4x4567, vmax);
152 vacc5x4567 = vminq_f32(vacc5x4567, vmax);
153 vacc6x4567 = vminq_f32(vacc6x4567, vmax);
154 vacc7x4567 = vminq_f32(vacc7x4567, vmax);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700155
Frank Barchardfcfdc0e2019-10-21 15:58:42 -0700156 const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
157 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
158 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
159 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
160 vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
161 vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
162 vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
163 vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
164 vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
165 vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
166 vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
167 vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
168 vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
169 vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
170 vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
171 vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
172 vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
XNNPACK Teamb455b122019-09-27 18:10:33 -0700173
174 if XNN_LIKELY(nc >= 8) {
175 vst1q_f32(c7, vacc7x0123);
176 vst1q_f32(c7 + 4, vacc7x4567);
177 c7 = (float*) ((uintptr_t) c7 + cn_stride);
178 vst1q_f32(c6, vacc6x0123);
179 vst1q_f32(c6 + 4, vacc6x4567);
180 c6 = (float*) ((uintptr_t) c6 + cn_stride);
181 vst1q_f32(c5, vacc5x0123);
182 vst1q_f32(c5 + 4, vacc5x4567);
183 c5 = (float*) ((uintptr_t) c5 + cn_stride);
184 vst1q_f32(c4, vacc4x0123);
185 vst1q_f32(c4 + 4, vacc4x4567);
186 c4 = (float*) ((uintptr_t) c4 + cn_stride);
187 vst1q_f32(c3, vacc3x0123);
188 vst1q_f32(c3 + 4, vacc3x4567);
189 c3 = (float*) ((uintptr_t) c3 + cn_stride);
190 vst1q_f32(c2, vacc2x0123);
191 vst1q_f32(c2 + 4, vacc2x4567);
192 c2 = (float*) ((uintptr_t) c2 + cn_stride);
193 vst1q_f32(c1, vacc1x0123);
194 vst1q_f32(c1 + 4, vacc1x4567);
195 c1 = (float*) ((uintptr_t) c1 + cn_stride);
196 vst1q_f32(c0, vacc0x0123);
197 vst1q_f32(c0 + 4, vacc0x4567);
198 c0 = (float*) ((uintptr_t) c0 + cn_stride);
199
200 a = (const float*) ((uintptr_t) a - kc * 8);
201
202 nc -= 8;
203 } else {
204 if (nc & 4) {
205 vst1q_f32(c7, vacc7x0123); c7 += 4;
206 vst1q_f32(c6, vacc6x0123); c6 += 4;
207 vst1q_f32(c5, vacc5x0123); c5 += 4;
208 vst1q_f32(c4, vacc4x0123); c4 += 4;
209 vst1q_f32(c3, vacc3x0123); c3 += 4;
210 vst1q_f32(c2, vacc2x0123); c2 += 4;
211 vst1q_f32(c1, vacc1x0123); c1 += 4;
212 vst1q_f32(c0, vacc0x0123); c0 += 4;
213
214 vacc7x0123 = vacc7x4567;
215 vacc6x0123 = vacc6x4567;
216 vacc5x0123 = vacc5x4567;
217 vacc4x0123 = vacc4x4567;
218 vacc3x0123 = vacc3x4567;
219 vacc2x0123 = vacc2x4567;
220 vacc1x0123 = vacc1x4567;
221 vacc0x0123 = vacc0x4567;
222 }
223 float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
224 float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
225 float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
226 float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
227 float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
228 float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
229 float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
230 float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
231 if (nc & 2) {
232 vst1_f32(c7, vacc7x01); c7 += 2;
233 vst1_f32(c6, vacc6x01); c6 += 2;
234 vst1_f32(c5, vacc5x01); c5 += 2;
235 vst1_f32(c4, vacc4x01); c4 += 2;
236 vst1_f32(c3, vacc3x01); c3 += 2;
237 vst1_f32(c2, vacc2x01); c2 += 2;
238 vst1_f32(c1, vacc1x01); c1 += 2;
239 vst1_f32(c0, vacc0x01); c0 += 2;
240
241 vacc7x01 = vget_high_f32(vacc7x0123);
242 vacc6x01 = vget_high_f32(vacc6x0123);
243 vacc5x01 = vget_high_f32(vacc5x0123);
244 vacc4x01 = vget_high_f32(vacc4x0123);
245 vacc3x01 = vget_high_f32(vacc3x0123);
246 vacc2x01 = vget_high_f32(vacc2x0123);
247 vacc1x01 = vget_high_f32(vacc1x0123);
248 vacc0x01 = vget_high_f32(vacc0x0123);
249 }
250 if (nc & 1) {
251 vst1_lane_f32(c7, vacc7x01, 0);
252 vst1_lane_f32(c6, vacc6x01, 0);
253 vst1_lane_f32(c5, vacc5x01, 0);
254 vst1_lane_f32(c4, vacc4x01, 0);
255 vst1_lane_f32(c3, vacc3x01, 0);
256 vst1_lane_f32(c2, vacc2x01, 0);
257 vst1_lane_f32(c1, vacc1x01, 0);
258 vst1_lane_f32(c0, vacc0x01, 0);
259 }
260
261 nc = 0;
262 }
263 } while (nc != 0);
264}