blob: 957887da3457595ae43f192ae3292a68eed3b577 [file] [log] [blame]
Frank Barchardc5704bf2020-12-21 23:09:00 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/dwconv.h>
15#include <xnnpack/math.h>
16
17
18
19void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4_acc2(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28{
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38 const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49 const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50 const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51 const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52 const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53
54 const v128_t vzero = wasm_f32x4_splat(0.0f);
55
56 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
57 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
58
59 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
60 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
61 if XNN_UNPREDICTABLE(padding_top != 0) {
62 i0 = zero;
63 }
64 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
65 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
66 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
67
68 float* o0 = output;
69 float* o1 = (float*) ((uintptr_t) o0 + output_width);
70
71 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
72 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
73 do {
74 if XNN_UNPREDICTABLE(padded_input_height < 4) {
75 i2 = zero;
76 }
77 if XNN_UNPREDICTABLE(padded_input_height < 5) {
78 i3 = zero;
79 o1 = o0;
80 }
81 if XNN_UNPREDICTABLE(padded_input_height < 6) {
82 i4 = zero;
83 }
84
85 v128_t vi0x1357 = vzero;
86 v128_t vi1x1357 = vzero;
87 v128_t vi2x1357 = vzero;
88 v128_t vi3x1357 = vzero;
89 v128_t vi4x1357 = vzero;
90
91 size_t w = input_width;
92 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
93 v128_t vo0p0 = vbias;
94 v128_t vo1p0 = vbias;
95
96 const v128_t vi0x89AB = wasm_v128_load(i0);
97 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
98 i0 += 8;
99 const v128_t vi1x89AB = wasm_v128_load(i1);
100 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
101 i1 += 8;
102 const v128_t vi2x89AB = wasm_v128_load(i2);
103 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
104 i2 += 8;
105 const v128_t vi3x89AB = wasm_v128_load(i3);
106 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
107 i3 += 8;
108 const v128_t vi4x89AB = wasm_v128_load(i4);
109 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
110 i4 += 8;
111
112 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
113 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
114 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
115 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
116 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
117 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
118 const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
119 const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
120 const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
121 const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
122
123 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
124 v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
125
126 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
127 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
128
129 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
130 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
131
132 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
133 vi0x1357 = vi0x9BDF;
134 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
135 vi1x1357 = vi1x9BDF;
136 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
137 vi2x1357 = vi2x9BDF;
138 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
139 vi3x1357 = vi3x9BDF;
140 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
141 vi4x1357 = vi4x9BDF;
142
143 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
144 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
145
146 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
147 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
148
149 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
150 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
151
152 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
153 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
154
155 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
156 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
157
158 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
159 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
160
161 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
162 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
163
164 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
165 v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
166 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
167 vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
168
169 wasm_v128_store(o1, vo1); o1 += 4;
170 wasm_v128_store(o0, vo0); o0 += 4;
171 }
172 // Last block has 0-7 pixels to process.
173 assert(w < 8 * sizeof(float));
174 if XNN_LIKELY(w != 0) {
175 v128_t vo0p0 = vbias;
176 v128_t vo1p0 = vbias;
177
178 const v128_t vi0x89AB = wasm_v128_load(i0);
179 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
180 const v128_t vi1x89AB = wasm_v128_load(i1);
181 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
182 const v128_t vi2x89AB = wasm_v128_load(i2);
183 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
184 const v128_t vi3x89AB = wasm_v128_load(i3);
185 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
186 const v128_t vi4x89AB = wasm_v128_load(i4);
187 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
188
189 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
190 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
191 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
192 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
193 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
194 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
195 const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
196 const v128_t vi3x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
197 const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
198 const v128_t vi4x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
199
200 v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, vk01);
201 v128_t vo1p1 = wasm_f32x4_mul(vi2x8ACE, vk01);
202
203 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
204 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
205
206 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
207 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
208
209 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
210 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
211 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
212 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
213 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
214
215 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, vk00));
216 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x7BDF, vk00));
217
218 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
219 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
220
221 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, vk20));
222 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x7BDF, vk20));
223
224 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
225 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
226
227 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, vk12));
228 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x9BDF, vk12));
229
230 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
231 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
232
233 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
234 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
235
236 v128_t vo0 = wasm_v128_bitselect(vmin, vo0p0, wasm_f32x4_lt(vo0p0, vmin));
237 v128_t vo1 = wasm_v128_bitselect(vmin, vo1p0, wasm_f32x4_lt(vo1p0, vmin));
238 vo0 = wasm_v128_bitselect(vo0, vmax, wasm_f32x4_le(vo0, vmax));
239 vo1 = wasm_v128_bitselect(vo1, vmax, wasm_f32x4_le(vo1, vmax));
240
241 w += 1 * sizeof(float);
242 if (w & (8 * sizeof(float))) {
243 wasm_v128_store(o1, vo1); o1 += 4;
244 wasm_v128_store(o0, vo0); o0 += 4;
245 } else {
246 if (w & (4 * sizeof(float))) {
247 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
248 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
249
250 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
251 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
252 }
253 if (w & (2 * sizeof(float))) {
254 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
255 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
256 }
257 }
258 }
259
260 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
261 i1 = (const float*) ((uintptr_t) i0 + input_width);
262 i2 = (const float*) ((uintptr_t) i1 + input_width);
263 i3 = (const float*) ((uintptr_t) i2 + input_width);
264 i4 = (const float*) ((uintptr_t) i3 + input_width);
265
266 o0 = o1;
267 o1 = (float*) ((uintptr_t) o0 + output_width);
268
269 output_height = doz(output_height, 2);
270 padded_input_height = doz(padded_input_height, 4);
271 } while (output_height != 0);
272}