blob: 5d39b303e8128095fef5f23605035b21e18a337f [file] [log] [blame]
Frank Barchardc5704bf2020-12-21 23:09:00 -08001// Auto-generated file. Do not edit!
2// Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3// Generator: tools/xngen
4//
5// Copyright 2020 Google LLC
6//
7// This source code is licensed under the BSD-style license found in the
8// LICENSE file in the root directory of this source tree.
9
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/dwconv.h>
15#include <xnnpack/math.h>
16
17
18
19void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_3x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)])
28{
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v32x4_load_splat(&params->scalar.max);
38 const v128_t vmin = wasm_v32x4_load_splat(&params->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v64x2_load_splat(weights + 8);
43 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49 const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50 const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51 const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52 const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53
54 const v128_t vzero = wasm_f32x4_splat(0.0f);
55
56 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
57 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
58
59 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
60 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
61 if XNN_UNPREDICTABLE(padding_top != 0) {
62 i0 = zero;
63 }
64 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
65 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
66 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
67 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
68 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
69
70 float* o0 = output;
71 float* o1 = (float*) ((uintptr_t) o0 + output_width);
72 float* o2 = (float*) ((uintptr_t) o1 + output_width);
73
74 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
75 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
76 do {
77 if XNN_UNPREDICTABLE(padded_input_height < 4) {
78 i2 = zero;
79 }
80 if XNN_UNPREDICTABLE(padded_input_height < 5) {
81 i3 = zero;
82 o1 = o0;
83 }
84 if XNN_UNPREDICTABLE(padded_input_height < 6) {
85 i4 = zero;
86 }
87 if XNN_UNPREDICTABLE(padded_input_height < 7) {
88 i5 = zero;
89 o2 = o1;
90 }
91 if XNN_UNPREDICTABLE(padded_input_height < 8) {
92 i6 = zero;
93 }
94
95 v128_t vi0x1357 = vzero;
96 v128_t vi1x1357 = vzero;
97 v128_t vi2x1357 = vzero;
98 v128_t vi3x1357 = vzero;
99 v128_t vi4x1357 = vzero;
100 v128_t vi5x1357 = vzero;
101 v128_t vi6x1357 = vzero;
102
103 size_t w = input_width;
104 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
105 v128_t vo0p0 = vbias;
106 v128_t vo1p0 = vbias;
107 v128_t vo2p0 = vbias;
108
109 const v128_t vi0x89AB = wasm_v128_load(i0);
110 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
111 i0 += 8;
112 const v128_t vi1x89AB = wasm_v128_load(i1);
113 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
114 i1 += 8;
115 const v128_t vi2x89AB = wasm_v128_load(i2);
116 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
117 i2 += 8;
118 const v128_t vi3x89AB = wasm_v128_load(i3);
119 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
120 i3 += 8;
121 const v128_t vi4x89AB = wasm_v128_load(i4);
122 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
123 i4 += 8;
124 const v128_t vi5x89AB = wasm_v128_load(i5);
125 const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
126 i5 += 8;
127 const v128_t vi6x89AB = wasm_v128_load(i6);
128 const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
129 i6 += 8;
130
131 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
132 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
133 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
134 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
135 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
136 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
137 const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
138 const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
139 const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
140 const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
141 const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
142 const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
143 const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
144 const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
145
146 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
147 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
148 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
149
150 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
151 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
152 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
153
154 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
155 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
156 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
157
158 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
159 vi0x1357 = vi0x9BDF;
160 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
161 vi1x1357 = vi1x9BDF;
162 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
163 vi2x1357 = vi2x9BDF;
164 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
165 vi3x1357 = vi3x9BDF;
166 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
167 vi4x1357 = vi4x9BDF;
168 const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
169 vi5x1357 = vi5x9BDF;
170 const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
171 vi6x1357 = vi6x9BDF;
172
173 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
174 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
175 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
176
177 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
178 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
179 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
180
181 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
182 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
183 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
184
185 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
186 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
187 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
188
189 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
190 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
191 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
192
193 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
194 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
195 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
196
197
198 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
199 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
200 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
201 vo0 = wasm_f32x4_min(vo0, vmax);
202 vo1 = wasm_f32x4_min(vo1, vmax);
203 vo2 = wasm_f32x4_min(vo2, vmax);
204
205 wasm_v128_store(o2, vo2); o2 += 4;
206 wasm_v128_store(o1, vo1); o1 += 4;
207 wasm_v128_store(o0, vo0); o0 += 4;
208 }
209 // Last block has 0-7 pixels to process.
210 assert(w < 8 * sizeof(float));
211 if XNN_LIKELY(w != 0) {
212 v128_t vo0p0 = vbias;
213 v128_t vo1p0 = vbias;
214 v128_t vo2p0 = vbias;
215
216 const v128_t vi0x89AB = wasm_v128_load(i0);
217 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
218 const v128_t vi1x89AB = wasm_v128_load(i1);
219 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
220 const v128_t vi2x89AB = wasm_v128_load(i2);
221 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
222 const v128_t vi3x89AB = wasm_v128_load(i3);
223 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
224 const v128_t vi4x89AB = wasm_v128_load(i4);
225 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
226 const v128_t vi5x89AB = wasm_v128_load(i5);
227 const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
228 const v128_t vi6x89AB = wasm_v128_load(i6);
229 const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
230
231 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
232 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
233 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
234 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
235 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
236 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
237 const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
238 const v128_t vi3x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
239 const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
240 const v128_t vi4x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
241 const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
242 const v128_t vi5x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
243 const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
244 const v128_t vi6x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
245
246 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
247 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
248 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, vk01));
249
250 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
251 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
252 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, vk11));
253
254 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
255 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
256 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, vk21));
257
258 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
259 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
260 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
261 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
262 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
263 const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
264 const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
265
266 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
267 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
268 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, vk00));
269
270 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
271 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
272 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, vk10));
273
274 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
275 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
276 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, vk20));
277
278 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
279 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
280 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, vk02));
281
282 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
283 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
284 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, vk12));
285
286 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
287 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
288 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, vk22));
289
290
291 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
292 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
293 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
294 vo0 = wasm_f32x4_min(vo0, vmax);
295 vo1 = wasm_f32x4_min(vo1, vmax);
296 vo2 = wasm_f32x4_min(vo2, vmax);
297
298 w += 1 * sizeof(float);
299 if (w & (8 * sizeof(float))) {
300 wasm_v128_store(o2, vo2); o2 += 4;
301 wasm_v128_store(o1, vo1); o1 += 4;
302 wasm_v128_store(o0, vo0); o0 += 4;
303 } else {
304 if (w & (4 * sizeof(float))) {
305 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
306 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
307 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
308
309 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
310 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
311 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
312 }
313 if (w & (2 * sizeof(float))) {
314 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
315 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
316 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
317 }
318 }
319 }
320
321 i0 = (const float*) ((uintptr_t) i6 - input_decrement);
322 i1 = (const float*) ((uintptr_t) i0 + input_width);
323 i2 = (const float*) ((uintptr_t) i1 + input_width);
324 i3 = (const float*) ((uintptr_t) i2 + input_width);
325 i4 = (const float*) ((uintptr_t) i3 + input_width);
326 i5 = (const float*) ((uintptr_t) i4 + input_width);
327 i6 = (const float*) ((uintptr_t) i5 + input_width);
328
329 o0 = o2;
330 o1 = (float*) ((uintptr_t) o0 + output_width);
331 o2 = (float*) ((uintptr_t) o1 + output_width);
332
333 output_height = doz(output_height, 3);
334 padded_input_height = doz(padded_input_height, 6);
335 } while (output_height != 0);
336}