blob: 12ab0df6f47adbf6eb126773c0110930e1d9033c [file] [log] [blame]
Frank Barchardc7a032d2021-11-10 12:37:49 -08001// Auto-generated file. Do not edit!
Frank Barcharde22685a2021-11-12 11:36:58 -08002// Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
Frank Barchardc7a032d2021-11-10 12:37:49 -08003// Generator: tools/xngen
4//
5// Copyright 2021 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <arm_neon.h>
13
14#include <xnnpack/gemm.h>
15#include <xnnpack/math.h>
16
17
Frank Barcharde22685a2021-11-12 11:36:58 -080018void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2s4__neon_mull(
Frank Barchardc7a032d2021-11-10 12:37:49 -080019 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
31{
32 assert(mr != 0);
33 assert(mr <= 3);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (3 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 int8_t* c0 = c;
44 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52
53 do {
54 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
55 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
56 int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
57 int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
58 int32x4_t vacc1x0123 = vacc0x0123;
59 int32x4_t vacc1x4567 = vacc0x4567;
60 int32x4_t vacc1x89AB = vacc0x89AB;
61 int32x4_t vacc1xCDEF = vacc0xCDEF;
62 int32x4_t vacc2x0123 = vacc0x0123;
63 int32x4_t vacc2x4567 = vacc0x4567;
64 int32x4_t vacc2x89AB = vacc0x89AB;
65 int32x4_t vacc2xCDEF = vacc0xCDEF;
66
67 size_t p = ks;
68 do {
69 const int8_t* restrict a0 = a[0];
70 if XNN_UNPREDICTABLE(a0 != zero) {
71 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
72 }
73 const int8_t* restrict a1 = a[1];
74 if XNN_UNPREDICTABLE(a1 != zero) {
75 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
76 }
77 const int8_t* restrict a2 = a[2];
78 if XNN_UNPREDICTABLE(a2 != zero) {
79 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
80 }
81 a += 3;
82
83 size_t k = kc;
84
85
86 while (k >= 8 * sizeof(int8_t)) {
87 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
88 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
89 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
90
91 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
92 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
93 const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
94 const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
95 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
96 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
97 const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
98 const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
99 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
100 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
101 const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
102 const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
103 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
104 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
105 const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
106 const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
107
108 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
109 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
110 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
111 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
112 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
113 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
114 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
115 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
116 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
117 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
118 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
119 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
120 int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
121 int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1x0);
122 int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2x0);
123 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
124 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
125 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
126 int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
127 int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1x0);
128 int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2x0);
129 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
130 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
131 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
132 va0x0 = vext_s8(va0x0, va0x0, 2);
133 va1x0 = vext_s8(va1x0, va1x0, 2);
134 va2x0 = vext_s8(va2x0, va2x0, 2);
135 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
136 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
137 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
138 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
139 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
140 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
141 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
142 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
143 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
144 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
145 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
146 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
147 int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
148 int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1x0);
149 int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2x0);
150 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
151 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
152 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
153 int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
154 int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1x0);
155 int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2x0);
156 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
157 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
158 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
159 va0x0 = vext_s8(va0x0, va0x0, 2);
160 va1x0 = vext_s8(va1x0, va1x0, 2);
161 va2x0 = vext_s8(va2x0, va2x0, 2);
162 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
163 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
164 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
165 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
166 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
167 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
168 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
169 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
170 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
171 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
172 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
173 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
174 int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
175 int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1x0);
176 int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2x0);
177 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
178 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
179 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
180 int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
181 int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1x0);
182 int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2x0);
183 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
184 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
185 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
186 va0x0 = vext_s8(va0x0, va0x0, 2);
187 va1x0 = vext_s8(va1x0, va1x0, 2);
188 va2x0 = vext_s8(va2x0, va2x0, 2);
189 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
190 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
191 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
192 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
193 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
194 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
195 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
196 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
197 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
198 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
199 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
200 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
201 int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
202 int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1x0);
203 int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2x0);
204 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
205 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
206 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
207 int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
208 int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1x0);
209 int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2x0);
210 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
211 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
212 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
213
214 k -= 8 * sizeof(int8_t);
215 }
216
217 if XNN_UNLIKELY(k != 0) {
218 const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
219 const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
220 const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
221
222 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
223 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
224 const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
225 const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
226
227 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
228 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
229 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
230 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
231 const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
232 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
233 const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)));
234 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
235 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
236 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
237 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
238 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
239 const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
240 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
241 const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)));
242 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
243 const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)));
244 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
245 const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)));
246 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
247 const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)));
248 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
249 const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)));
250 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
251
252 if (k > 2 * sizeof(int8_t)) {
253 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
254 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
255 const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
256 const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
257
258 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
259 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
260 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
261 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
262 const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
263 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
264 const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)));
265 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
266 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
267 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
268 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
269 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
270 const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
271 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
272 const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)));
273 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
274 const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)));
275 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
276 const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)));
277 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
278 const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)));
279 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
280 const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)));
281 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
282
283 if (k > 4 * sizeof(int8_t)) {
284 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
285 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
286 const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
287 const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
288
289 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
290 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
291 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
292 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
293 const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
294 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
295 const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)));
296 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
297 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
298 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
299 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
300 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
301 const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
302 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
303 const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)));
304 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
305 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)));
306 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
307 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)));
308 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
309 const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)));
310 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
311 const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)));
312 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
313
314 if (k > 6 * sizeof(int8_t)) {
315 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
316 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
317 const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
318 const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
319
320 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
321 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
322 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
323 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
324 const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
325 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
326 const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)));
327 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
328 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
329 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
330 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
331 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
332 const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
333 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
334 const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)));
335 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
336 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)));
337 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
338 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)));
339 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
340 const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)));
341 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
342 const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)));
343 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
344 }
345 }
346 }
347 }
348 p -= 3 * sizeof(void*);
349 } while (p != 0);
350
351 const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
352 const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
353 const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
354
355 vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
356 vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
357 vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
358 vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
359 vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
360 vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
361 vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
362 vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
363 vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
364 vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
365 vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
366 vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
367
368 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
369 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
370 vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
371 vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
372 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
373 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
374 vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
375 vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
376 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
377 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
378 vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
379 vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
380
381 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
382 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
383 vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
384 vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
385 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
386 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
387 vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
388 vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
389 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
390 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
391 vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
392 vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
393
394 const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
395#if XNN_ARCH_ARM64
396 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
397 const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
398 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
399 const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
400 const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
401 const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
402
403 int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
404 int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
405 int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
406#else
407 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
408 const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
409 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
410 const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
411 const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
412 const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
413
414 int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
415 int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
416 int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
417#endif
418 const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
419 const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
420
421 vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
422 vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
423 vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
424
425 vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
426 vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
427 vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
428
429 if (nc >= 16) {
430 vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
431 vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
432 vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
433
434 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
435 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
436 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
437
438 a = (const int8_t**restrict) ((uintptr_t) a - ks);
439
440 nc -= 16;
441 } else {
442 int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
443 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
444 if (nc & 8) {
445 vst1_s8(c2, vout2x01234567); c2 += 8;
446 vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
447 vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
448 vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
449 vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
450 }
451 if (nc & 4) {
452 vst1_lane_u32(__builtin_assume_aligned(c2, 1), vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
453 vst1q_lane_u32(__builtin_assume_aligned(c1, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
454 vst1q_lane_u32(__builtin_assume_aligned(c0, 1), vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
455 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
456 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
457 }
458 if (nc & 2) {
459 vst1_lane_u16(__builtin_assume_aligned(c2, 1), vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
460 vst1q_lane_u16(__builtin_assume_aligned(c1, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
461 vst1q_lane_u16(__builtin_assume_aligned(c0, 1), vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
462 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
463 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
464 }
465 if (nc & 1) {
466 vst1_lane_s8(c2, vout2x01234567, 0);
467 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
468 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
469 }
470
471 nc = 0;
472 }
473 } while (nc != 0);
474}