blob: b103c893657e2fcde97658f6b2525d8f086a1432 [file] [log] [blame]
Frank Barchard5243bb02019-11-22 16:37:50 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-gemm/neon-ld64.c.in
3// Generator: tools/xngen
4//
5// Copyright 2019 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10
11#include <assert.h>
12
13#include <arm_neon.h>
14
15#include <xnnpack/gemm.h>
16
17
18void xnn_f32_gemm_ukernel_4x8__neon_dup_ld64(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const float* restrict a,
23 size_t a_stride,
24 const float* restrict w,
25 float* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
Marat Dukhaneb09a6b2020-04-08 17:34:32 -070028 const union xnn_f32_minmax_params params[restrict static 1])
Frank Barchard5243bb02019-11-22 16:37:50 -080029{
30 assert(mr != 0);
31 assert(mr <= 4);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(float) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const float* a0 = a;
40 float* c0 = c;
41 const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr != 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59
60 do {
61 float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
62 float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
63 float32x4_t vacc1x0123 = vacc0x0123;
64 float32x4_t vacc1x4567 = vacc0x4567;
65 float32x4_t vacc2x0123 = vacc0x0123;
66 float32x4_t vacc2x4567 = vacc0x4567;
67 float32x4_t vacc3x0123 = vacc0x0123;
68 float32x4_t vacc3x4567 = vacc0x4567;
69
70 size_t k = kc;
71 for (; k >= 2 * sizeof(float); k -= 2 * sizeof(float)) {
72 const float32x2_t va0 = vld1_f32(a0); a0 += 2;
73 const float32x2_t va1 = vld1_f32(a1); a1 += 2;
74 const float32x2_t va2 = vld1_f32(a2); a2 += 2;
75 const float32x2_t va3 = vld1_f32(a3); a3 += 2;
76
77 const float32x4_t vb0123c0 = vld1q_f32(w); w += 4;
78 const float32x4_t vb4567c0 = vld1q_f32(w); w += 4;
79
80 const float32x4_t va0c0 = vdupq_lane_f32(va0, 0);
81 const float32x4_t va1c0 = vdupq_lane_f32(va1, 0);
82 const float32x4_t va2c0 = vdupq_lane_f32(va2, 0);
83 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0);
84 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c0, vb0123c0);
85 vacc1x0123 = vmlaq_f32(vacc1x0123, va1c0, vb0123c0);
86 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c0, vb0123c0);
87 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0);
88 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c0, vb4567c0);
89 vacc1x4567 = vmlaq_f32(vacc1x4567, va1c0, vb4567c0);
90 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c0, vb4567c0);
91 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0);
92 const float32x4_t vb0123c1 = vld1q_f32(w); w += 4;
93 const float32x4_t vb4567c1 = vld1q_f32(w); w += 4;
94
95 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1);
96 const float32x4_t va1c1 = vdupq_lane_f32(va1, 1);
97 const float32x4_t va2c1 = vdupq_lane_f32(va2, 1);
98 const float32x4_t va3c1 = vdupq_lane_f32(va3, 1);
99 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1);
100 vacc1x0123 = vmlaq_f32(vacc1x0123, va1c1, vb0123c1);
101 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c1, vb0123c1);
102 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c1, vb0123c1);
103 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1);
104 vacc1x4567 = vmlaq_f32(vacc1x4567, va1c1, vb4567c1);
105 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c1, vb4567c1);
106 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c1, vb4567c1);
107 }
108 if XNN_UNLIKELY(k != 0) {
109 const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
110 const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
111 const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
112 const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
113
114 const float32x4_t vb0123 = vld1q_f32(w); w += 4;
115 const float32x4_t vb4567 = vld1q_f32(w); w += 4;
116
117 vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
118 vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
119 vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
120 vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
121 vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
122 vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
123 vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
124 vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
125 }
126 const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
127 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
128 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
129 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
130 vacc3x0123 = vminq_f32(vacc3x0123, vmax);
131 vacc0x4567 = vminq_f32(vacc0x4567, vmax);
132 vacc1x4567 = vminq_f32(vacc1x4567, vmax);
133 vacc2x4567 = vminq_f32(vacc2x4567, vmax);
134 vacc3x4567 = vminq_f32(vacc3x4567, vmax);
135
136 const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
137 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
138 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
139 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
140 vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
141 vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
142 vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
143 vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
144 vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
145
146 if XNN_LIKELY(nc >= 8) {
147 vst1q_f32(c3, vacc3x0123);
148 vst1q_f32(c3 + 4, vacc3x4567);
149 c3 = (float*) ((uintptr_t) c3 + cn_stride);
150 vst1q_f32(c2, vacc2x0123);
151 vst1q_f32(c2 + 4, vacc2x4567);
152 c2 = (float*) ((uintptr_t) c2 + cn_stride);
153 vst1q_f32(c1, vacc1x0123);
154 vst1q_f32(c1 + 4, vacc1x4567);
155 c1 = (float*) ((uintptr_t) c1 + cn_stride);
156 vst1q_f32(c0, vacc0x0123);
157 vst1q_f32(c0 + 4, vacc0x4567);
158 c0 = (float*) ((uintptr_t) c0 + cn_stride);
159
160 a3 = (const float*) ((uintptr_t) a3 - kc);
161 a2 = (const float*) ((uintptr_t) a2 - kc);
162 a1 = (const float*) ((uintptr_t) a1 - kc);
163 a0 = (const float*) ((uintptr_t) a0 - kc);
164
165 nc -= 8;
166
167 } else {
168 if (nc & 4) {
169 vst1q_f32(c3, vacc3x0123); c3 += 4;
170 vst1q_f32(c2, vacc2x0123); c2 += 4;
171 vst1q_f32(c1, vacc1x0123); c1 += 4;
172 vst1q_f32(c0, vacc0x0123); c0 += 4;
173
174 vacc3x0123 = vacc3x4567;
175 vacc2x0123 = vacc2x4567;
176 vacc1x0123 = vacc1x4567;
177 vacc0x0123 = vacc0x4567;
178 }
179 float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
180 float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
181 float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
182 float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
183 if (nc & 2) {
184 vst1_f32(c3, vacc3x01); c3 += 2;
185 vst1_f32(c2, vacc2x01); c2 += 2;
186 vst1_f32(c1, vacc1x01); c1 += 2;
187 vst1_f32(c0, vacc0x01); c0 += 2;
188
189 vacc3x01 = vget_high_f32(vacc3x0123);
190 vacc2x01 = vget_high_f32(vacc2x0123);
191 vacc1x01 = vget_high_f32(vacc1x0123);
192 vacc0x01 = vget_high_f32(vacc0x0123);
193 }
194 if (nc & 1) {
195 vst1_lane_f32(c3, vacc3x01, 0);
196 vst1_lane_f32(c2, vacc2x01, 0);
197 vst1_lane_f32(c1, vacc1x01, 0);
198 vst1_lane_f32(c0, vacc0x01, 0);
199 }
200
201 nc = 0;
202 }
203 } while (nc != 0);
204}