blob: 030d1446ca4d394269761f8c3c246e364420f66c [file] [log] [blame]
Marat Dukhan07bd2522020-07-31 19:12:39 -07001// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
Marat Dukhan1566fee2020-08-02 21:55:41 -07006$SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h", 5: "ammintrin.h"}[SSE]
Marat Dukhan12809522020-08-02 22:23:51 -07007$assert MR <= 4
Marat Dukhan07bd2522020-07-31 19:12:39 -07008#include <assert.h>
9
Marat Dukhan1566fee2020-08-02 21:55:41 -070010$if SSE == 5:
11 #ifdef __GNUC__
12 #include <x86intrin.h>
13 #else
14 #include <immintrin.h>
15 #include <${SSE_HEADER}>
16 #endif
17$else:
18 #include <${SSE_HEADER}>
Marat Dukhan07bd2522020-07-31 19:12:39 -070019
Marat Dukhan1566fee2020-08-02 21:55:41 -070020#include <xnnpack/igemm.h>
Frank Barchard6d8ca7d2021-03-01 11:05:08 -080021#include <xnnpack/math.h>
Marat Dukhan07bd2522020-07-31 19:12:39 -070022
23
Marat Dukhan1566fee2020-08-02 21:55:41 -070024$ISA = {2: "sse2", 3: "ssse3", 4: "sse41", 5: "xop"}[SSE]
Marat Dukhan07bd2522020-07-31 19:12:39 -070025void xnn_qs8_igemm_minmax_ukernel_${MR}x4c8__${ISA}_${"ld128" if LD128 else "ld64"}(
26 size_t mr,
27 size_t nc,
28 size_t kc,
29 size_t ks,
30 const int8_t** restrict a,
31 const void* restrict w,
32 int8_t* restrict c,
33 size_t cm_stride,
34 size_t cn_stride,
35 size_t a_offset,
36 const int8_t* zero,
37 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
38{
39 assert(mr != 0);
40 assert(mr <= ${MR});
41 assert(nc != 0);
42 assert(kc != 0);
Marat Dukhan12809522020-08-02 22:23:51 -070043 assert(ks != 0);
44 assert(ks % (${MR} * sizeof(void*)) == 0);
45 assert(a_offset % sizeof(int8_t) == 0);
Marat Dukhan07bd2522020-07-31 19:12:39 -070046 assert(a != NULL);
47 assert(w != NULL);
48 assert(c != NULL);
49
Frank Barchard6d8ca7d2021-03-01 11:05:08 -080050 kc = round_up_po2(kc, 8);
Marat Dukhan07bd2522020-07-31 19:12:39 -070051 int8_t* c0 = c;
52 $for M in range(1, MR):
53 int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
54 $if M % 2 == 0:
55 if XNN_UNPREDICTABLE(mr <= ${M}) {
56 c${M} = c${M-1};
57 }
58 $elif M + 1 == MR:
59 if XNN_UNPREDICTABLE(mr != ${M+1}) {
60 c${M} = c${M-1};
61 }
62 $else:
63 if XNN_UNPREDICTABLE(mr < ${M+1}) {
64 c${M} = c${M-1};
65 }
66
67 do {
68 $for N in range(4):
69 __m128i vacc0x${N} = _mm_cvtsi32_si128((int) ((const int32_t*) w)[${N}]);
70 $for M in range(1, MR):
71 $for N in range(4):
72 __m128i vacc${M}x${N} = vacc0x${N};
73 w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
74
75 size_t p = ks;
76 do {
77 $for M in range(MR):
78 const int8_t* restrict a${M} = a[${M}];
79 if XNN_UNPREDICTABLE(a${M} != zero) {
80 a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset);
81 }
82 a += ${MR};
83
84 size_t k = 0;
85 while (k < kc) {
86 $for M in range(MR):
87 const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
Marat Dukhan1566fee2020-08-02 21:55:41 -070088 $if SSE >= 4:
Marat Dukhan07bd2522020-07-31 19:12:39 -070089 const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
90 $else:
91 const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, _mm_cmpgt_epi8(_mm_setzero_si128(), va${M}));
92 a${M} += 8;
93
94 $if LD128:
95 $for N in range(0, 4, 2):
96 $if N == 0:
97 const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) w);
98 $else:
99 const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) ((uintptr_t) w + ${N * 8}));
100 const __m128i vsb${N}${N+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${N}${N+1});
101 const __m128i vxb${N} = _mm_unpacklo_epi8(vb${N}${N+1}, vsb${N}${N+1});
102 const __m128i vxb${N+1} = _mm_unpackhi_epi8(vb${N}${N+1}, vsb${N}${N+1});
103
104 $for M in range(MR):
Marat Dukhan1566fee2020-08-02 21:55:41 -0700105 $if SSE == 5:
106 vacc${M}x${N} = _mm_maddd_epi16(vxa${M}, vxb${N}, vacc${M}x${N});
107 vacc${M}x${N+1} = _mm_maddd_epi16(vxa${M}, vxb${N+1}, vacc${M}x${N+1});
108 $else:
109 vacc${M}x${N} = _mm_add_epi32(vacc${M}x${N}, _mm_madd_epi16(vxa${M}, vxb${N}));
110 vacc${M}x${N+1} = _mm_add_epi32(vacc${M}x${N+1}, _mm_madd_epi16(vxa${M}, vxb${N+1}));
Marat Dukhan07bd2522020-07-31 19:12:39 -0700111 $else:
112 $for N in range(4):
113 $if N == 0:
114 const __m128i vb${N} = _mm_loadl_epi64((const __m128i*) w);
115 $else:
116 const __m128i vb${N} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${N * 8}));
Marat Dukhan1566fee2020-08-02 21:55:41 -0700117 $if SSE >= 4:
Marat Dukhan07bd2522020-07-31 19:12:39 -0700118 const __m128i vxb${N} = _mm_cvtepi8_epi16(vb${N});
119 $else:
120 const __m128i vxb${N} = _mm_unpacklo_epi8(vb${N}, _mm_cmpgt_epi8(_mm_setzero_si128(), vb${N}));
121
122 $for M in range(MR):
Marat Dukhan1566fee2020-08-02 21:55:41 -0700123 $if SSE == 5:
124 vacc${M}x${N} = _mm_maddd_epi16(vxa${M}, vxb${N}, vacc${M}x${N});
125 $else:
126 vacc${M}x${N} = _mm_add_epi32(vacc${M}x${N}, _mm_madd_epi16(vxa${M}, vxb${N}));
Marat Dukhan07bd2522020-07-31 19:12:39 -0700127
128 w = (const void*) ((uintptr_t) w + 32);
129 k += 8 * sizeof(int8_t);
130 }
131 p -= ${MR} * sizeof(void*);
132 } while (p != 0);
133
134 $if SSE >= 3:
135 $for M in range(MR):
136 const __m128i vacc${M}x01 = _mm_hadd_epi32(vacc${M}x0, vacc${M}x1);
137 const __m128i vacc${M}x23 = _mm_hadd_epi32(vacc${M}x2, vacc${M}x3);
138
139 $for M in range(MR):
140 __m128i vacc${M}x0123 = _mm_hadd_epi32(vacc${M}x01, vacc${M}x23);
141 $else:
142 $for M in range(MR):
143 const __m128i vacc${M}x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x0, vacc${M}x2), _mm_unpackhi_epi32(vacc${M}x0, vacc${M}x2));
144 const __m128i vacc${M}x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x1, vacc${M}x3), _mm_unpackhi_epi32(vacc${M}x1, vacc${M}x3));
145
146 $for M in range(MR):
147 __m128i vacc${M}x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc${M}x02, vacc${M}x13), _mm_unpackhi_epi32(vacc${M}x02, vacc${M}x13));
148
149 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
150 const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
151
Marat Dukhan1566fee2020-08-02 21:55:41 -0700152 $if SSE >= 4:
Marat Dukhan07bd2522020-07-31 19:12:39 -0700153 $for M in range(MR):
Marat Dukhan23848db2020-08-05 09:10:16 -0700154 const __m128i vacc${M}x1133 = _mm_shuffle_epi32(vacc${M}x0123, _MM_SHUFFLE(3, 3, 1, 1));
Marat Dukhan07bd2522020-07-31 19:12:39 -0700155
156 $for M in range(MR):
157 const __m128i vprod${M}x02 = _mm_add_epi64(_mm_mul_epi32(vacc${M}x0123, vmultiplier), vrounding);
158
159 $for M in range(MR):
Marat Dukhan23848db2020-08-05 09:10:16 -0700160 const __m128i vprod${M}x13 = _mm_add_epi64(_mm_mul_epi32(vacc${M}x1133, vmultiplier), vrounding);
Marat Dukhan07bd2522020-07-31 19:12:39 -0700161
162 $for M in range(MR):
163 const __m128i vq31prod${M}x02 = _mm_srli_epi64(vprod${M}x02, 31);
164 const __m128i vq31prod${M}x13 = _mm_add_epi64(vprod${M}x13, vprod${M}x13);
165
166 $for M in range(MR):
167 const __m128i vq31prod${M}x0123 = _mm_blend_epi16(vq31prod${M}x02, vq31prod${M}x13, 0xCC);
168 $else:
169 $for M in range(MR):
170 const __m128i vnmask${M}x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${M}x0123);
171
172 $for M in range(MR):
173 $if SSE >= 3:
174 const __m128i vabsacc${M}x0123 = _mm_abs_epi32(vacc${M}x0123);
175 $else:
176 const __m128i vabsacc${M}x0123 = _mm_sub_epi32(_mm_xor_si128(vacc${M}x0123, vnmask${M}x0123), vnmask${M}x0123);
177
178 $for M in range(MR):
Marat Dukhan23848db2020-08-05 09:10:16 -0700179 const __m128i vabsacc${M}x1133 = _mm_shuffle_epi32(vabsacc${M}x0123, _MM_SHUFFLE(3, 3, 1, 1));
Marat Dukhan07bd2522020-07-31 19:12:39 -0700180
181 $for M in range(MR):
182 const __m128i vabsprod${M}x02 = _mm_mul_epu32(vabsacc${M}x0123, vmultiplier);
183
184 $for M in range(MR):
185 const __m128i vnmask${M}x02 = _mm_shuffle_epi32(vnmask${M}x0123, _MM_SHUFFLE(2, 2, 0, 0));
186
187 $for M in range(MR):
188 const __m128i vprod${M}x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod${M}x02, vnmask${M}x02), vnmask${M}x02);
189
190 $for M in range(MR):
191 const __m128i vq31prod${M}x02 = _mm_srli_epi64(_mm_add_epi64(vprod${M}x02, vrounding), 31);
192
193 $for M in range(MR):
Marat Dukhan23848db2020-08-05 09:10:16 -0700194 const __m128i vabsprod${M}x13 = _mm_mul_epu32(vabsacc${M}x1133, vmultiplier);
Marat Dukhan07bd2522020-07-31 19:12:39 -0700195
196 $for M in range(MR):
197 const __m128i vnmask${M}x13 = _mm_shuffle_epi32(vnmask${M}x0123, _MM_SHUFFLE(3, 3, 1, 1));
198
199 $for M in range(MR):
200 const __m128i vprod${M}x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod${M}x13, vnmask${M}x13), vnmask${M}x13);
201
202 $for M in range(MR):
203 const __m128i vq31prod${M}x13 = _mm_srli_epi64(_mm_add_epi64(vprod${M}x13, vrounding), 31);
204
205 $for M in range(MR):
206 const __m128i vq31prod${M}x0213 = _mm_castps_si128(_mm_shuffle_ps(
207 _mm_castsi128_ps(vq31prod${M}x02), _mm_castsi128_ps(vq31prod${M}x13), _MM_SHUFFLE(2, 0, 2, 0)));
208
209 $for M in range(MR):
210 const __m128i vq31prod${M}x0123 = _mm_shuffle_epi32(vq31prod${M}x0213, _MM_SHUFFLE(3, 1, 2, 0));
211
212 const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
213 $for M in range(MR):
214 const __m128i vrem${M}x0123 =
215 _mm_add_epi32(_mm_and_si128(vq31prod${M}x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${M}x0123));
216
217 const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
218 const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
219 $for M in range(MR):
220 vacc${M}x0123 =
221 _mm_sub_epi32(_mm_sra_epi32(vq31prod${M}x0123, vshift), _mm_cmpgt_epi32(vrem${M}x0123, vremainder_threshold));
222
223 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
224 $for M in range(0, MR, 2):
225 __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point);
226
227 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
228 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
229 $for M in range(0, MR, 2):
230 vacc${M}${min(M+1, MR-1)}x0123 = _mm_min_epi16(_mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min), voutput_max);
231
Marat Dukhan12809522020-08-02 22:23:51 -0700232 $if MR > 2:
Marat Dukhan07bd2522020-07-31 19:12:39 -0700233 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
234 $else:
235 __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
236
237 if (nc >= 4) {
238 $for M in reversed(range(1, MR)):
Marat Dukhan1566fee2020-08-02 21:55:41 -0700239 $if SSE >= 4:
Marat Dukhan07bd2522020-07-31 19:12:39 -0700240 *((uint32_t*) c${M}) = (uint32_t) _mm_extract_epi32(vout, ${M});
241 $else:
242 *((uint32_t*) c${M}) = (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(${M}, ${M}, ${M}, ${M})));
243 c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
244 *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
245 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
246
247 a = (const int8_t**restrict) ((uintptr_t) a - ks);
248
249 nc -= 4;
250 } else {
251 if (nc & 2) {
252 $for M in reversed(range(MR)):
253 *((uint16_t*) c${M}) = (uint16_t) _mm_extract_epi16(vout, ${M * 2});
254 c${M} += 2;
255 vout = _mm_srli_epi32(vout, 16);
256 }
257 if (nc & 1) {
Marat Dukhan1566fee2020-08-02 21:55:41 -0700258 $if SSE >= 4:
Marat Dukhan07bd2522020-07-31 19:12:39 -0700259 $for M in reversed(range(MR)):
260 *((int8_t*) c${M}) = (int8_t) _mm_extract_epi8(vout, ${M * 4});
261 $else:
262 $for M in reversed(range(1, MR)):
263 *((int8_t*) c${M}) = (int8_t) _mm_extract_epi16(vout, ${M * 2});
264 *((int8_t*) c0) = (int8_t) _mm_cvtsi128_si32(vout);
265 }
266
267 nc = 0;
268 }
269 } while (nc != 0);
270}