blob: 6531f5086a7e92eea055c9a54f0a0fe441ea6b3b [file] [log] [blame]
Marat Dukhanc46e6712021-06-01 19:00:16 -07001// Auto-generated file. Do not edit!
2// Template: src/qs8-igemm/MRx4c8-sse.c.in
3// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <smmintrin.h>
13
14#include <xnnpack/igemm.h>
15#include <xnnpack/math.h>
16
17
18void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld128(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
31{
32 assert(mr != 0);
33 assert(mr <= 3);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (3 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 kc = round_up_po2(kc, 8);
44 int8_t* c0 = c;
45 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53
54 do {
55 __m128i vacc0x0 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[0]);
56 __m128i vacc0x1 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[1]);
57 __m128i vacc0x2 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[2]);
58 __m128i vacc0x3 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[3]);
59 __m128i vacc1x0 = vacc0x0;
60 __m128i vacc1x1 = vacc0x1;
61 __m128i vacc1x2 = vacc0x2;
62 __m128i vacc1x3 = vacc0x3;
63 __m128i vacc2x0 = vacc0x0;
64 __m128i vacc2x1 = vacc0x1;
65 __m128i vacc2x2 = vacc0x2;
66 __m128i vacc2x3 = vacc0x3;
67 w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
68
69 size_t p = ks;
70 do {
71 const int8_t* restrict a0 = a[0];
72 if XNN_UNPREDICTABLE(a0 != zero) {
73 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
74 }
75 const int8_t* restrict a1 = a[1];
76 if XNN_UNPREDICTABLE(a1 != zero) {
77 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
78 }
79 const int8_t* restrict a2 = a[2];
80 if XNN_UNPREDICTABLE(a2 != zero) {
81 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
82 }
83 a += 3;
84
85 size_t k = 0;
86 while (k < kc) {
87 const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
88 const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
89 a0 += 8;
90 const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
91 const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
92 a1 += 8;
93 const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
94 const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
95 a2 += 8;
96
97 const __m128i vb01 = _mm_load_si128((const __m128i*) w);
98 const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
99 const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
100 const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
101
102 vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
103 vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
104 vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
105 vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
106 vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
107 vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
108 const __m128i vb23 = _mm_load_si128((const __m128i*) ((uintptr_t) w + 16));
109 const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
110 const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
111 const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
112
113 vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
114 vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
115 vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
116 vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
117 vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
118 vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
119
120 w = (const void*) ((uintptr_t) w + 32);
121 k += 8 * sizeof(int8_t);
122 }
123 p -= 3 * sizeof(void*);
124 } while (p != 0);
125
126 const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
127 const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
128 const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
129 const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
130 const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
131 const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
132
133 __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
134 __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
135 __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
136
137 __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
138 __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
139 __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
140
141 const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
142 vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
143 vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
144 vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
145
146 vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
147 vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
148 vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
149
150 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
151 __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
152 __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
153
154
155 __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123);
156
157 vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
158 vout = _mm_min_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_max));
159
160 if (nc >= 4) {
161 *((uint32_t*) c2) = (uint32_t) _mm_extract_epi32(vout, 2);
162 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
163 *((uint32_t*) c1) = (uint32_t) _mm_extract_epi32(vout, 1);
164 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
165 *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
166 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
167
168 a = (const int8_t**restrict) ((uintptr_t) a - ks);
169
170 nc -= 4;
171 } else {
172 if (nc & 2) {
173 *((uint16_t*) c2) = (uint16_t) _mm_extract_epi16(vout, 4);
174 c2 += 2;
175 *((uint16_t*) c1) = (uint16_t) _mm_extract_epi16(vout, 2);
176 c1 += 2;
177 *((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout, 0);
178 c0 += 2;
179 vout = _mm_srli_epi32(vout, 16);
180 }
181 if (nc & 1) {
182 *((int8_t*) c2) = (int8_t) _mm_extract_epi8(vout, 8);
183 *((int8_t*) c1) = (int8_t) _mm_extract_epi8(vout, 4);
184 *((int8_t*) c0) = (int8_t) _mm_extract_epi8(vout, 0);
185 }
186
187 nc = 0;
188 }
189 } while (nc != 0);
190}