blob: 15bc8bb1c810a904aedb711a0b4afcdf1671af7c [file] [log] [blame]
Marat Dukhan27203da2020-08-05 15:19:03 -07001// Auto-generated file. Do not edit!
Marat Dukhan66ccf642020-09-28 16:23:42 -07002// Template: src/qs8-igemm/MRx4c8-wasmsimd.c.in
Marat Dukhan27203da2020-08-05 15:19:03 -07003// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/gemm.h>
15
16
17void xnn_qs8_igemm_minmax_ukernel_1x4c8__wasmsimd_ld64(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const int8_t** restrict a,
23 const void* restrict w,
24 int8_t* restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const int8_t* zero,
29 const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
30{
31 assert(mr != 0);
32 assert(mr <= 1);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(ks != 0);
36 assert(ks % (1 * sizeof(void*)) == 0);
37 assert(a_offset % sizeof(int8_t) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 int8_t* c0 = c;
43
44 const v128_t vzero = wasm_f64x2_splat(0.0);
45 do {
46 v128_t vacc0x0 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[0]);
47 v128_t vacc0x1 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[1]);
48 v128_t vacc0x2 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[2]);
49 v128_t vacc0x3 = wasm_f32x4_replace_lane(vzero, 0, ((const float*) w)[3]);
50 w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
51
52 size_t p = ks;
53 do {
54 const int8_t* restrict a0 = a[0];
55 if XNN_UNPREDICTABLE(a0 != zero) {
56 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
57 }
58 a += 1;
59
60 size_t k = 0;
61 while (k < kc) {
62 const v128_t vxa0 = wasm_i16x8_load_8x8(a0);
63 a0 += 8;
64
65 const v128_t vxb0 = wasm_i16x8_load_8x8(w);
66
67 const v128_t vprod0x0 = wasm_i16x8_mul(vxa0, vxb0);
68 vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_low_i16x8(vprod0x0));
69 vacc0x0 = wasm_i32x4_add(vacc0x0, wasm_i32x4_widen_high_i16x8(vprod0x0));
70 const v128_t vxb1 = wasm_i16x8_load_8x8((const void*) ((uintptr_t) w + 8 * sizeof(int8_t)));
71
72 const v128_t vprod0x1 = wasm_i16x8_mul(vxa0, vxb1);
73 vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_low_i16x8(vprod0x1));
74 vacc0x1 = wasm_i32x4_add(vacc0x1, wasm_i32x4_widen_high_i16x8(vprod0x1));
75 const v128_t vxb2 = wasm_i16x8_load_8x8((const void*) ((uintptr_t) w + 16 * sizeof(int8_t)));
76
77 const v128_t vprod0x2 = wasm_i16x8_mul(vxa0, vxb2);
78 vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_low_i16x8(vprod0x2));
79 vacc0x2 = wasm_i32x4_add(vacc0x2, wasm_i32x4_widen_high_i16x8(vprod0x2));
80 const v128_t vxb3 = wasm_i16x8_load_8x8((const void*) ((uintptr_t) w + 24 * sizeof(int8_t)));
81
82 const v128_t vprod0x3 = wasm_i16x8_mul(vxa0, vxb3);
83 vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_low_i16x8(vprod0x3));
84 vacc0x3 = wasm_i32x4_add(vacc0x3, wasm_i32x4_widen_high_i16x8(vprod0x3));
85
86 w = (const void*) ((uintptr_t) w + 32 * sizeof(int8_t));
87 k += 8 * sizeof(int8_t);
88 }
89 p -= 1 * sizeof(void*);
90 } while (p != 0);
91
92 const v128_t vacc0x02 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x0, vacc0x2, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x0, vacc0x2, 2, 6, 3, 7));
93 const v128_t vacc0x13 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x1, vacc0x3, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x1, vacc0x3, 2, 6, 3, 7));
94
95 v128_t vacc0x0123 = wasm_i32x4_add(wasm_v32x4_shuffle(vacc0x02, vacc0x13, 0, 4, 1, 5), wasm_v32x4_shuffle(vacc0x02, vacc0x13, 2, 6, 3, 7));
96
97 const v128_t vsign0x0123 = wasm_i32x4_lt(vacc0x0123, vzero);
98
99 const v128_t vacc0x01 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 0, 4, 1, 5);
100
101 const v128_t vmultiplier = wasm_v128_load(params->wasmsimd.multiplier);
102 const v128_t vrounding = wasm_v128_load(params->wasmsimd.rounding);
103 const v128_t vprod0x01 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x01, vmultiplier), vrounding);
104 const v128_t vacc0x23 = wasm_v32x4_shuffle(vacc0x0123, vsign0x0123, 2, 6, 3, 7);
105
106 const v128_t vprod0x23 = wasm_i64x2_add(wasm_i64x2_mul(vacc0x23, vmultiplier), vrounding);
107
108 const v128_t vq31prod0x0123 = wasm_v32x4_shuffle(vprod0x01, vprod0x23, 1, 3, 5, 7);
109
110 const v128_t vremainder_mask = wasm_v128_load(params->wasmsimd.remainder_mask);
111 const v128_t vrem0x0123 = wasm_i32x4_add(wasm_v128_and(vq31prod0x0123, vremainder_mask), wasm_i32x4_lt(vq31prod0x0123, vzero));
112
113 const v128_t vthreshold = wasm_v128_load(params->wasmsimd.remainder_threshold);
114 const int32_t vshift = params->wasmsimd.shift;
115 vacc0x0123 = wasm_i32x4_sub(wasm_i32x4_shr(vq31prod0x0123, vshift), wasm_i32x4_gt(vrem0x0123, vthreshold));
116
117 const v128_t voutput_zero_point = wasm_v128_load(params->wasmsimd.output_zero_point);
118 v128_t vacc00x0123 = wasm_i16x8_add_saturate(wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123), voutput_zero_point);
119
120 v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
121
122 const v128_t voutput_min = wasm_v128_load(params->wasmsimd.output_min);
123 vout = wasm_i8x16_max(vout, voutput_min);
124
125 const v128_t voutput_max = wasm_v128_load(params->wasmsimd.output_max);
126 vout = wasm_i8x16_min(vout, voutput_max);
127
128 if (nc >= 4) {
129 *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0);
130
131 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
132
133 a = (const int8_t**restrict) ((uintptr_t) a - ks);
134
135 nc -= 4;
136 } else {
137 if (nc & 2) {
138 *((uint16_t*) c0) = (uint16_t) wasm_i16x8_extract_lane(vout, 0);
139 c0 += 2;
140 vout = wasm_u32x4_shr(vout, 16);
141 }
142 if (nc & 1) {
143 *c0 = (int8_t) wasm_i8x16_extract_lane(vout, 0);
144 }
145
146 nc = 0;
147 }
148 } while (nc != 0);
149}