blob: 3f3aefadf59a511ca89532449c4f5bf24eb4b201 [file] [log] [blame]
Marat Dukhane7edc802020-08-03 16:43:57 -07001// Auto-generated file. Do not edit!
Marat Dukhan66ccf642020-09-28 16:23:42 -07002// Template: src/qs8-igemm/MRx4c8-sse.c.in
Marat Dukhane7edc802020-08-03 16:43:57 -07003// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <tmmintrin.h>
13
14#include <xnnpack/igemm.h>
15
16
17void xnn_qs8_igemm_minmax_ukernel_3x4c8__ssse3_ld128(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const int8_t** restrict a,
23 const void* restrict w,
24 int8_t* restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const int8_t* zero,
29 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31 assert(mr != 0);
32 assert(mr <= 3);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(ks != 0);
36 assert(ks % (3 * sizeof(void*)) == 0);
37 assert(a_offset % sizeof(int8_t) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 int8_t* c0 = c;
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 c1 = c0;
46 }
47 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
48 if XNN_UNPREDICTABLE(mr <= 2) {
49 c2 = c1;
50 }
51
52 do {
53 __m128i vacc0x0 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[0]);
54 __m128i vacc0x1 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[1]);
55 __m128i vacc0x2 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[2]);
56 __m128i vacc0x3 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[3]);
57 __m128i vacc1x0 = vacc0x0;
58 __m128i vacc1x1 = vacc0x1;
59 __m128i vacc1x2 = vacc0x2;
60 __m128i vacc1x3 = vacc0x3;
61 __m128i vacc2x0 = vacc0x0;
62 __m128i vacc2x1 = vacc0x1;
63 __m128i vacc2x2 = vacc0x2;
64 __m128i vacc2x3 = vacc0x3;
65 w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
66
67 size_t p = ks;
68 do {
69 const int8_t* restrict a0 = a[0];
70 if XNN_UNPREDICTABLE(a0 != zero) {
71 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
72 }
73 const int8_t* restrict a1 = a[1];
74 if XNN_UNPREDICTABLE(a1 != zero) {
75 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
76 }
77 const int8_t* restrict a2 = a[2];
78 if XNN_UNPREDICTABLE(a2 != zero) {
79 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
80 }
81 a += 3;
82
83 size_t k = 0;
84 while (k < kc) {
85 const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
86 const __m128i vxa0 = _mm_unpacklo_epi8(va0, _mm_cmpgt_epi8(_mm_setzero_si128(), va0));
87 a0 += 8;
88 const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
89 const __m128i vxa1 = _mm_unpacklo_epi8(va1, _mm_cmpgt_epi8(_mm_setzero_si128(), va1));
90 a1 += 8;
91 const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
92 const __m128i vxa2 = _mm_unpacklo_epi8(va2, _mm_cmpgt_epi8(_mm_setzero_si128(), va2));
93 a2 += 8;
94
95 const __m128i vb01 = _mm_load_si128((const __m128i*) w);
96 const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
97 const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
98 const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
99
100 vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
101 vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
102 vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
103 vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
104 vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
105 vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
106 const __m128i vb23 = _mm_load_si128((const __m128i*) ((uintptr_t) w + 16));
107 const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
108 const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
109 const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
110
111 vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
112 vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
113 vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
114 vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
115 vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
116 vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
117
118 w = (const void*) ((uintptr_t) w + 32);
119 k += 8 * sizeof(int8_t);
120 }
121 p -= 3 * sizeof(void*);
122 } while (p != 0);
123
124 const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
125 const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
126 const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
127 const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
128 const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
129 const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
130
131 __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
132 __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
133 __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
134
135 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
136 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
137
138 const __m128i vnmask0x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0x0123);
139 const __m128i vnmask1x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc1x0123);
140 const __m128i vnmask2x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc2x0123);
141
142 const __m128i vabsacc0x0123 = _mm_abs_epi32(vacc0x0123);
143 const __m128i vabsacc1x0123 = _mm_abs_epi32(vacc1x0123);
144 const __m128i vabsacc2x0123 = _mm_abs_epi32(vacc2x0123);
145
Marat Dukhan23848db2020-08-05 09:10:16 -0700146 const __m128i vabsacc0x1133 = _mm_shuffle_epi32(vabsacc0x0123, _MM_SHUFFLE(3, 3, 1, 1));
147 const __m128i vabsacc1x1133 = _mm_shuffle_epi32(vabsacc1x0123, _MM_SHUFFLE(3, 3, 1, 1));
148 const __m128i vabsacc2x1133 = _mm_shuffle_epi32(vabsacc2x0123, _MM_SHUFFLE(3, 3, 1, 1));
Marat Dukhane7edc802020-08-03 16:43:57 -0700149
150 const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier);
151 const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier);
152 const __m128i vabsprod2x02 = _mm_mul_epu32(vabsacc2x0123, vmultiplier);
153
154 const __m128i vnmask0x02 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(2, 2, 0, 0));
155 const __m128i vnmask1x02 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(2, 2, 0, 0));
156 const __m128i vnmask2x02 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(2, 2, 0, 0));
157
158 const __m128i vprod0x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x02, vnmask0x02), vnmask0x02);
159 const __m128i vprod1x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x02, vnmask1x02), vnmask1x02);
160 const __m128i vprod2x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x02, vnmask2x02), vnmask2x02);
161
162 const __m128i vq31prod0x02 = _mm_srli_epi64(_mm_add_epi64(vprod0x02, vrounding), 31);
163 const __m128i vq31prod1x02 = _mm_srli_epi64(_mm_add_epi64(vprod1x02, vrounding), 31);
164 const __m128i vq31prod2x02 = _mm_srli_epi64(_mm_add_epi64(vprod2x02, vrounding), 31);
165
Marat Dukhan23848db2020-08-05 09:10:16 -0700166 const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1133, vmultiplier);
167 const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1133, vmultiplier);
168 const __m128i vabsprod2x13 = _mm_mul_epu32(vabsacc2x1133, vmultiplier);
Marat Dukhane7edc802020-08-03 16:43:57 -0700169
170 const __m128i vnmask0x13 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(3, 3, 1, 1));
171 const __m128i vnmask1x13 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(3, 3, 1, 1));
172 const __m128i vnmask2x13 = _mm_shuffle_epi32(vnmask2x0123, _MM_SHUFFLE(3, 3, 1, 1));
173
174 const __m128i vprod0x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x13, vnmask0x13), vnmask0x13);
175 const __m128i vprod1x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x13, vnmask1x13), vnmask1x13);
176 const __m128i vprod2x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod2x13, vnmask2x13), vnmask2x13);
177
178 const __m128i vq31prod0x13 = _mm_srli_epi64(_mm_add_epi64(vprod0x13, vrounding), 31);
179 const __m128i vq31prod1x13 = _mm_srli_epi64(_mm_add_epi64(vprod1x13, vrounding), 31);
180 const __m128i vq31prod2x13 = _mm_srli_epi64(_mm_add_epi64(vprod2x13, vrounding), 31);
181
182 const __m128i vq31prod0x0213 = _mm_castps_si128(_mm_shuffle_ps(
183 _mm_castsi128_ps(vq31prod0x02), _mm_castsi128_ps(vq31prod0x13), _MM_SHUFFLE(2, 0, 2, 0)));
184 const __m128i vq31prod1x0213 = _mm_castps_si128(_mm_shuffle_ps(
185 _mm_castsi128_ps(vq31prod1x02), _mm_castsi128_ps(vq31prod1x13), _MM_SHUFFLE(2, 0, 2, 0)));
186 const __m128i vq31prod2x0213 = _mm_castps_si128(_mm_shuffle_ps(
187 _mm_castsi128_ps(vq31prod2x02), _mm_castsi128_ps(vq31prod2x13), _MM_SHUFFLE(2, 0, 2, 0)));
188
189 const __m128i vq31prod0x0123 = _mm_shuffle_epi32(vq31prod0x0213, _MM_SHUFFLE(3, 1, 2, 0));
190 const __m128i vq31prod1x0123 = _mm_shuffle_epi32(vq31prod1x0213, _MM_SHUFFLE(3, 1, 2, 0));
191 const __m128i vq31prod2x0123 = _mm_shuffle_epi32(vq31prod2x0213, _MM_SHUFFLE(3, 1, 2, 0));
192
193 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
194 const __m128i vrem0x0123 =
195 _mm_add_epi32(_mm_and_si128(vq31prod0x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0x0123));
196 const __m128i vrem1x0123 =
197 _mm_add_epi32(_mm_and_si128(vq31prod1x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod1x0123));
198 const __m128i vrem2x0123 =
199 _mm_add_epi32(_mm_and_si128(vq31prod2x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod2x0123));
200
201 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
202 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
203 vacc0x0123 =
204 _mm_sub_epi32(_mm_sra_epi32(vq31prod0x0123, vshift), _mm_cmpgt_epi32(vrem0x0123, vremainder_threshold));
205 vacc1x0123 =
206 _mm_sub_epi32(_mm_sra_epi32(vq31prod1x0123, vshift), _mm_cmpgt_epi32(vrem1x0123, vremainder_threshold));
207 vacc2x0123 =
208 _mm_sub_epi32(_mm_sra_epi32(vq31prod2x0123, vshift), _mm_cmpgt_epi32(vrem2x0123, vremainder_threshold));
209
210 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
211 __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
212 __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
213
214 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
215 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
216 vacc01x0123 = _mm_min_epi16(_mm_max_epi16(vacc01x0123, voutput_min), voutput_max);
217 vacc22x0123 = _mm_min_epi16(_mm_max_epi16(vacc22x0123, voutput_min), voutput_max);
218
219 __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123);
220
221 if (nc >= 4) {
222 *((uint32_t*) c2) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)));
223 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
224 *((uint32_t*) c1) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)));
225 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
226 *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
227 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
228
229 a = (const int8_t**restrict) ((uintptr_t) a - ks);
230
231 nc -= 4;
232 } else {
233 if (nc & 2) {
234 *((uint16_t*) c2) = (uint16_t) _mm_extract_epi16(vout, 4);
235 c2 += 2;
236 *((uint16_t*) c1) = (uint16_t) _mm_extract_epi16(vout, 2);
237 c1 += 2;
238 *((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout, 0);
239 c0 += 2;
240 vout = _mm_srli_epi32(vout, 16);
241 }
242 if (nc & 1) {
243 *((int8_t*) c2) = (int8_t) _mm_extract_epi16(vout, 4);
244 *((int8_t*) c1) = (int8_t) _mm_extract_epi16(vout, 2);
245 *((int8_t*) c0) = (int8_t) _mm_cvtsi128_si32(vout);
246 }
247
248 nc = 0;
249 }
250 } while (nc != 0);
251}