blob: 7b5ce73f6068ff32c37acbf7e8837bbcc1f66c54 [file] [log] [blame]
Marat Dukhanf9480682020-07-31 14:50:24 -07001// Auto-generated file. Do not edit!
Marat Dukhan66ccf642020-09-28 16:23:42 -07002// Template: src/qs8-igemm/MRx4c2-sse.c.in
Marat Dukhanf9480682020-07-31 14:50:24 -07003// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <emmintrin.h>
13
14#include <xnnpack/igemm.h>
15
16
Marat Dukhan14d3ce82020-07-31 16:19:15 -070017void xnn_qs8_igemm_minmax_ukernel_4x4c2__sse2_ld64(
Marat Dukhanf9480682020-07-31 14:50:24 -070018 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const int8_t** restrict a,
23 const void* restrict w,
24 int8_t* restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const int8_t* zero,
29 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31 assert(mr != 0);
32 assert(mr <= 4);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(ks != 0);
36 assert(ks % (4 * sizeof(void*)) == 0);
37 assert(a_offset % sizeof(int8_t) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 int8_t* c0 = c;
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 c1 = c0;
46 }
47 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 c2 = c1;
50 }
51 int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
52 if XNN_UNPREDICTABLE(mr != 4) {
53 c3 = c2;
54 }
55
56 do {
57 __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
58 __m128i vacc1x0123 = vacc0x0123;
59 __m128i vacc2x0123 = vacc0x0123;
60 __m128i vacc3x0123 = vacc0x0123;
Marat Dukhan14d3ce82020-07-31 16:19:15 -070061 w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
Marat Dukhanf9480682020-07-31 14:50:24 -070062
63 size_t p = ks;
64 do {
65 const int8_t* restrict a0 = a[0];
66 if XNN_UNPREDICTABLE(a0 != zero) {
67 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
68 }
69 const int8_t* restrict a1 = a[1];
70 if XNN_UNPREDICTABLE(a1 != zero) {
71 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
72 }
73 const int8_t* restrict a2 = a[2];
74 if XNN_UNPREDICTABLE(a2 != zero) {
75 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
76 }
77 const int8_t* restrict a3 = a[3];
78 if XNN_UNPREDICTABLE(a3 != zero) {
79 a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
80 }
81 a += 4;
82
83 size_t k = kc;
84 while (k >= 8 * sizeof(int8_t)) {
85 const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
86 const __m128i vxa0 = _mm_unpacklo_epi8(va0, _mm_cmpgt_epi8(_mm_setzero_si128(), va0));
87 a0 += 8;
88 const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
89 const __m128i vxa1 = _mm_unpacklo_epi8(va1, _mm_cmpgt_epi8(_mm_setzero_si128(), va1));
90 a1 += 8;
91 const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
92 const __m128i vxa2 = _mm_unpacklo_epi8(va2, _mm_cmpgt_epi8(_mm_setzero_si128(), va2));
93 a2 += 8;
94 const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
95 const __m128i vxa3 = _mm_unpacklo_epi8(va3, _mm_cmpgt_epi8(_mm_setzero_si128(), va3));
96 a3 += 8;
97
98 const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
99 const __m128i vxb0 = _mm_unpacklo_epi8(vb0, _mm_cmpgt_epi8(_mm_setzero_si128(), vb0));
100
101 vacc0x0123 = _mm_add_epi32(vacc0x0123,
102 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
103 vacc1x0123 = _mm_add_epi32(vacc1x0123,
104 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
105 vacc2x0123 = _mm_add_epi32(vacc2x0123,
106 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
107 vacc3x0123 = _mm_add_epi32(vacc3x0123,
108 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
109 const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8));
110 const __m128i vxb1 = _mm_unpacklo_epi8(vb1, _mm_cmpgt_epi8(_mm_setzero_si128(), vb1));
111
112 vacc0x0123 = _mm_add_epi32(vacc0x0123,
113 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
114 vacc1x0123 = _mm_add_epi32(vacc1x0123,
115 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
116 vacc2x0123 = _mm_add_epi32(vacc2x0123,
117 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
118 vacc3x0123 = _mm_add_epi32(vacc3x0123,
119 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
120 const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16));
121 const __m128i vxb2 = _mm_unpacklo_epi8(vb2, _mm_cmpgt_epi8(_mm_setzero_si128(), vb2));
122
123 vacc0x0123 = _mm_add_epi32(vacc0x0123,
124 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
125 vacc1x0123 = _mm_add_epi32(vacc1x0123,
126 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
127 vacc2x0123 = _mm_add_epi32(vacc2x0123,
128 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
129 vacc3x0123 = _mm_add_epi32(vacc3x0123,
130 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
131 const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24));
132 const __m128i vxb3 = _mm_unpacklo_epi8(vb3, _mm_cmpgt_epi8(_mm_setzero_si128(), vb3));
133
134 vacc0x0123 = _mm_add_epi32(vacc0x0123,
135 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
136 vacc1x0123 = _mm_add_epi32(vacc1x0123,
137 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
138 vacc2x0123 = _mm_add_epi32(vacc2x0123,
139 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
140 vacc3x0123 = _mm_add_epi32(vacc3x0123,
141 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
142
143 w = (const void*) ((uintptr_t) w + 32);
144 k -= 8 * sizeof(int8_t);
145 }
146 if (k != 0) {
147 const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
148 const __m128i vxa0 = _mm_unpacklo_epi8(va0, _mm_cmpgt_epi8(_mm_setzero_si128(), va0));
149 a0 = (const int8_t*) ((uintptr_t) a0 + k);
150 const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
151 const __m128i vxa1 = _mm_unpacklo_epi8(va1, _mm_cmpgt_epi8(_mm_setzero_si128(), va1));
152 a1 = (const int8_t*) ((uintptr_t) a1 + k);
153 const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
154 const __m128i vxa2 = _mm_unpacklo_epi8(va2, _mm_cmpgt_epi8(_mm_setzero_si128(), va2));
155 a2 = (const int8_t*) ((uintptr_t) a2 + k);
156 const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
157 const __m128i vxa3 = _mm_unpacklo_epi8(va3, _mm_cmpgt_epi8(_mm_setzero_si128(), va3));
158 a3 = (const int8_t*) ((uintptr_t) a3 + k);
159
160 const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
161 w = (const void*) ((uintptr_t) w + 8);
162 const __m128i vxb0 = _mm_unpacklo_epi8(vb0, _mm_cmpgt_epi8(_mm_setzero_si128(), vb0));
163
164 vacc0x0123 = _mm_add_epi32(vacc0x0123,
165 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
166 vacc1x0123 = _mm_add_epi32(vacc1x0123,
167 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
168 vacc2x0123 = _mm_add_epi32(vacc2x0123,
169 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
170 vacc3x0123 = _mm_add_epi32(vacc3x0123,
171 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
172
173 if (k > 2 * sizeof(int8_t)) {
174 const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
175 w = (const void*) ((uintptr_t) w + 8);
176 const __m128i vxb1 = _mm_unpacklo_epi8(vb1, _mm_cmpgt_epi8(_mm_setzero_si128(), vb1));
177
178 vacc0x0123 = _mm_add_epi32(vacc0x0123,
179 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
180 vacc1x0123 = _mm_add_epi32(vacc1x0123,
181 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
182 vacc2x0123 = _mm_add_epi32(vacc2x0123,
183 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
184 vacc3x0123 = _mm_add_epi32(vacc3x0123,
185 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
186
187 if (k > 4 * sizeof(int8_t)) {
188 const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
189 w = (const void*) ((uintptr_t) w + 8);
190 const __m128i vxb2 = _mm_unpacklo_epi8(vb2, _mm_cmpgt_epi8(_mm_setzero_si128(), vb2));
191
192 vacc0x0123 = _mm_add_epi32(vacc0x0123,
193 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
194 vacc1x0123 = _mm_add_epi32(vacc1x0123,
195 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
196 vacc2x0123 = _mm_add_epi32(vacc2x0123,
197 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
198 vacc3x0123 = _mm_add_epi32(vacc3x0123,
199 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
200
201 if (k > 6 * sizeof(int8_t)) {
202 const __m128i vb3 = _mm_loadl_epi64((const __m128i*) w);
203 w = (const void*) ((uintptr_t) w + 8);
204 const __m128i vxb3 = _mm_unpacklo_epi8(vb3, _mm_cmpgt_epi8(_mm_setzero_si128(), vb3));
205
206 vacc0x0123 = _mm_add_epi32(vacc0x0123,
207 _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
208 vacc1x0123 = _mm_add_epi32(vacc1x0123,
209 _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
210 vacc2x0123 = _mm_add_epi32(vacc2x0123,
211 _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
212 vacc3x0123 = _mm_add_epi32(vacc3x0123,
213 _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
214 }
215 }
216 }
217 }
218 p -= 4 * sizeof(void*);
219 } while (p != 0);
220
221 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
222 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
223
224 const __m128i vnmask0x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0x0123);
225 const __m128i vnmask1x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc1x0123);
226 const __m128i vnmask2x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc2x0123);
227 const __m128i vnmask3x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc3x0123);
228
229 const __m128i vabsacc0x0123 = _mm_sub_epi32(_mm_xor_si128(vacc0x0123, vnmask0x0123), vnmask0x0123);
230 const __m128i vabsacc1x0123 = _mm_sub_epi32(_mm_xor_si128(vacc1x0123, vnmask1x0123), vnmask1x0123);
231 const __m128i vabsacc2x0123 = _mm_sub_epi32(_mm_xor_si128(vacc2x0123, vnmask2x0123), vnmask2x0123);
232 const __m128i vabsacc3x0123 = _mm_sub_epi32(_mm_xor_si128(vacc3x0123, vnmask3x0123), vnmask3x0123);
233
Marat Dukhan23848db2020-08-05 09:10:16 -0700234 const __m128i vabsacc0x1133 = _mm_shuffle_epi32(vabsacc0x0123, _MM_SHUFFLE(3, 3, 1, 1));
235 const __m128i vabsacc1x1133 = _mm_shuffle_epi32(vabsacc1x0123, _MM_SHUFFLE(3, 3, 1, 1));
236 const __m128i vabsacc2x1133 = _mm_shuffle_epi32(vabsacc2x0123, _MM_SHUFFLE(3, 3, 1, 1));
237 const __m128i vabsacc3x1133 = _mm_shuffle_epi32(vabsacc3x0123, _MM_SHUFFLE(3, 3, 1, 1));
Marat Dukhanf9480682020-07-31 14:50:24 -0700238
239 const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier);
240 const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier);
241 const __m128i vabsprod2x02 = _mm_mul_epu32(vabsacc2x0123, vmultiplier);
242 const __m128i vabsprod3x02 = _mm_mul_epu32(vabsacc3x0123, vmultiplier);
243
244 const __m128i vnmask0x02 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(2, 2, 0, 0));
245 const __m128i vnmask1x02 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(2, 2, 0, 0));
246 const __m128i vnmask2x02 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(2, 2, 0, 0));
247 const __m128i vnmask3x02 = _mm_shuffle_epi32(vnmask3x0123, _MM_SHUFFLE(2, 2, 0, 0));
248
249 const __m128i vprod0x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x02, vnmask0x02), vnmask0x02);
250 const __m128i vprod1x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x02, vnmask1x02), vnmask1x02);
251 const __m128i vprod2x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x02, vnmask2x02), vnmask2x02);
252 const __m128i vprod3x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod3x02, vnmask3x02), vnmask3x02);
253
254 const __m128i vq31prod0x02 = _mm_srli_epi64(_mm_add_epi64(vprod0x02, vrounding), 31);
255 const __m128i vq31prod1x02 = _mm_srli_epi64(_mm_add_epi64(vprod1x02, vrounding), 31);
256 const __m128i vq31prod2x02 = _mm_srli_epi64(_mm_add_epi64(vprod2x02, vrounding), 31);
257 const __m128i vq31prod3x02 = _mm_srli_epi64(_mm_add_epi64(vprod3x02, vrounding), 31);
258
Marat Dukhan23848db2020-08-05 09:10:16 -0700259 const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1133, vmultiplier);
260 const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1133, vmultiplier);
261 const __m128i vabsprod2x13 = _mm_mul_epu32(vabsacc2x1133, vmultiplier);
262 const __m128i vabsprod3x13 = _mm_mul_epu32(vabsacc3x1133, vmultiplier);
Marat Dukhanf9480682020-07-31 14:50:24 -0700263
264 const __m128i vnmask0x13 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(3, 3, 1, 1));
265 const __m128i vnmask1x13 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(3, 3, 1, 1));
266 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1));
267 const __m128i vnmask3x13 = _mm_shuffle_epi32(vnmask3x0123, _MM_SHUFFLE(3, 3, 1, 1));
268
269 const __m128i vprod0x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x13, vnmask0x13), vnmask0x13);
270 const __m128i vprod1x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x13, vnmask1x13), vnmask1x13);
271 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13);
272 const __m128i vprod3x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod3x13, vnmask3x13), vnmask3x13);
273
274 const __m128i vq31prod0x13 = _mm_srli_epi64(_mm_add_epi64(vprod0x13, vrounding), 31);
275 const __m128i vq31prod1x13 = _mm_srli_epi64(_mm_add_epi64(vprod1x13, vrounding), 31);
276 const __m128i vq31prod2x13 = _mm_srli_epi64(_mm_add_epi64(vprod2x13, vrounding), 31);
277 const __m128i vq31prod3x13 = _mm_srli_epi64(_mm_add_epi64(vprod3x13, vrounding), 31);
278
279 const __m128i vq31prod0x0213 = _mm_castps_si128(_mm_shuffle_ps(
280 _mm_castsi128_ps(vq31prod0x02), _mm_castsi128_ps(vq31prod0x13), _MM_SHUFFLE(2, 0, 2, 0)));
281 const __m128i vq31prod1x0213 = _mm_castps_si128(_mm_shuffle_ps(
282 _mm_castsi128_ps(vq31prod1x02), _mm_castsi128_ps(vq31prod1x13), _MM_SHUFFLE(2, 0, 2, 0)));
283 const __m128i vq31prod2x0213 = _mm_castps_si128(_mm_shuffle_ps(
284 _mm_castsi128_ps(vq31prod2x02), _mm_castsi128_ps(vq31prod2x13), _MM_SHUFFLE(2, 0, 2, 0)));
285 const __m128i vq31prod3x0213 = _mm_castps_si128(_mm_shuffle_ps(
286 _mm_castsi128_ps(vq31prod3x02), _mm_castsi128_ps(vq31prod3x13), _MM_SHUFFLE(2, 0, 2, 0)));
287
288 const __m128i vq31prod0x0123 = _mm_shuffle_epi32(vq31prod0x0213, _MM_SHUFFLE(3, 1, 2, 0));
289 const __m128i vq31prod1x0123 = _mm_shuffle_epi32(vq31prod1x0213, _MM_SHUFFLE(3, 1, 2, 0));
290 const __m128i vq31prod2x0123 = _mm_shuffle_epi32(vq31prod2x0213, _MM_SHUFFLE(3, 1, 2, 0));
291 const __m128i vq31prod3x0123 = _mm_shuffle_epi32(vq31prod3x0213, _MM_SHUFFLE(3, 1, 2, 0));
292
293 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
294 const __m128i vrem0x0123 =
295 _mm_add_epi32(_mm_and_si128(vq31prod0x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0x0123));
296 const __m128i vrem1x0123 =
297 _mm_add_epi32(_mm_and_si128(vq31prod1x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod1x0123));
298 const __m128i vrem2x0123 =
299 _mm_add_epi32(_mm_and_si128(vq31prod2x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod2x0123));
300 const __m128i vrem3x0123 =
301 _mm_add_epi32(_mm_and_si128(vq31prod3x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod3x0123));
302
303 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
304 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
305 vacc0x0123 =
306 _mm_sub_epi32(_mm_sra_epi32(vq31prod0x0123, vshift), _mm_cmpgt_epi32(vrem0x0123, vremainder_threshold));
307 vacc1x0123 =
308 _mm_sub_epi32(_mm_sra_epi32(vq31prod1x0123, vshift), _mm_cmpgt_epi32(vrem1x0123, vremainder_threshold));
309 vacc2x0123 =
310 _mm_sub_epi32(_mm_sra_epi32(vq31prod2x0123, vshift), _mm_cmpgt_epi32(vrem2x0123, vremainder_threshold));
311 vacc3x0123 =
312 _mm_sub_epi32(_mm_sra_epi32(vq31prod3x0123, vshift), _mm_cmpgt_epi32(vrem3x0123, vremainder_threshold));
313
314 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
315 __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
316 __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
317
318 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
319 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
320 vacc01x0123 = _mm_min_epi16(_mm_max_epi16(vacc01x0123, voutput_min), voutput_max);
321 vacc23x0123 = _mm_min_epi16(_mm_max_epi16(vacc23x0123, voutput_min), voutput_max);
322
323 __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123);
324
325 if (nc >= 4) {
326 *((uint32_t*) c3) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3)));
327 c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
328 *((uint32_t*) c2) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)));
329 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
330 *((uint32_t*) c1) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)));
331 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
332 *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
333 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
334
335 a = (const int8_t**restrict) ((uintptr_t) a - ks);
336
337 nc -= 4;
338 } else {
339 if (nc & 2) {
340 *((uint16_t*) c3) = (uint16_t) _mm_extract_epi16(vout, 6);
341 c3 += 2;
342 *((uint16_t*) c2) = (uint16_t) _mm_extract_epi16(vout, 4);
343 c2 += 2;
344 *((uint16_t*) c1) = (uint16_t) _mm_extract_epi16(vout, 2);
345 c1 += 2;
346 *((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout, 0);
347 c0 += 2;
348 vout = _mm_srli_epi32(vout, 16);
349 }
350 if (nc & 1) {
351 *((int8_t*) c3) = (int8_t) _mm_extract_epi16(vout, 6);
352 *((int8_t*) c2) = (int8_t) _mm_extract_epi16(vout, 4);
353 *((int8_t*) c1) = (int8_t) _mm_extract_epi16(vout, 2);
354 *((int8_t*) c0) = (int8_t) _mm_cvtsi128_si32(vout);
355 }
356
357 nc = 0;
358 }
359 } while (nc != 0);
360}