blob: d3fdbce92428ed98a30c783144d2f263db064026 [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6#pragma once
7
8#include <gtest/gtest.h>
9
10#include <algorithm>
11#include <cassert>
12#include <cstddef>
13#include <cstdlib>
14#include <functional>
15#include <limits>
16#include <random>
17#include <vector>
18
19#include <xnnpack.h>
20
21
22class ArgmaxPoolingOperatorTester {
23 public:
24 inline ArgmaxPoolingOperatorTester& padding(uint32_t padding) {
25 this->padding_top_ = padding;
26 this->padding_right_ = padding;
27 this->padding_bottom_ = padding;
28 this->padding_left_ = padding;
29 return *this;
30 }
31
32 inline ArgmaxPoolingOperatorTester& padding_height(uint32_t padding_height) {
33 this->padding_top_ = padding_height;
34 this->padding_bottom_ = padding_height;
35 return *this;
36 }
37
38 inline ArgmaxPoolingOperatorTester& padding_width(uint32_t padding_width) {
39 this->padding_right_ = padding_width;
40 this->padding_left_ = padding_width;
41 return *this;
42 }
43
44 inline ArgmaxPoolingOperatorTester& padding_top(uint32_t padding_top) {
45 this->padding_top_ = padding_top;
46 return *this;
47 }
48
49 inline uint32_t padding_top() const {
50 return this->padding_top_;
51 }
52
53 inline ArgmaxPoolingOperatorTester& padding_right(uint32_t padding_right) {
54 this->padding_right_ = padding_right;
55 return *this;
56 }
57
58 inline uint32_t padding_right() const {
59 return this->padding_right_;
60 }
61
62 inline ArgmaxPoolingOperatorTester& padding_bottom(uint32_t padding_bottom) {
63 this->padding_bottom_ = padding_bottom;
64 return *this;
65 }
66
67 inline uint32_t padding_bottom() const {
68 return this->padding_bottom_;
69 }
70
71 inline ArgmaxPoolingOperatorTester& padding_left(uint32_t padding_left) {
72 this->padding_left_ = padding_left;
73 return *this;
74 }
75
76 inline uint32_t padding_left() const {
77 return this->padding_left_;
78 }
79
80 inline ArgmaxPoolingOperatorTester& input_size(size_t input_height, size_t input_width) {
81 assert(input_height >= 1);
82 assert(input_width >= 1);
83 this->input_height_ = input_height;
84 this->input_width_ = input_width;
85 return *this;
86 }
87
88 inline ArgmaxPoolingOperatorTester& input_height(size_t input_height) {
89 assert(input_height >= 1);
90 this->input_height_ = input_height;
91 return *this;
92 }
93
94 inline size_t input_height() const {
95 return this->input_height_;
96 }
97
98 inline ArgmaxPoolingOperatorTester& input_width(size_t input_width) {
99 assert(input_width >= 1);
100 this->input_width_ = input_width;
101 return *this;
102 }
103
104 inline size_t input_width() const {
105 return this->input_width_;
106 }
107
108 inline ArgmaxPoolingOperatorTester& channels(size_t channels) {
109 assert(channels != 0);
110 this->channels_ = channels;
111 return *this;
112 }
113
114 inline size_t channels() const {
115 return this->channels_;
116 }
117
118 inline ArgmaxPoolingOperatorTester& batch_size(size_t batch_size) {
119 assert(batch_size != 0);
120 this->batch_size_ = batch_size;
121 return *this;
122 }
123
124 inline size_t batch_size() const {
125 return this->batch_size_;
126 }
127
128 inline ArgmaxPoolingOperatorTester& pooling_size(uint32_t pooling_size) {
129 assert(pooling_size >= 1);
130 this->pooling_height_ = pooling_size;
131 this->pooling_width_ = pooling_size;
132 return *this;
133 }
134
135 inline ArgmaxPoolingOperatorTester& pooling_size(uint32_t pooling_height, uint32_t pooling_width) {
136 assert(pooling_height >= 1);
137 assert(pooling_width >= 1);
138 this->pooling_height_ = pooling_height;
139 this->pooling_width_ = pooling_width;
140 return *this;
141 }
142
143 inline ArgmaxPoolingOperatorTester& pooling_height(uint32_t pooling_height) {
144 assert(pooling_height >= 1);
145 this->pooling_height_ = pooling_height;
146 return *this;
147 }
148
149 inline uint32_t pooling_height() const {
150 return this->pooling_height_;
151 }
152
153 inline ArgmaxPoolingOperatorTester& pooling_width(uint32_t pooling_width) {
154 assert(pooling_width >= 1);
155 this->pooling_width_ = pooling_width;
156 return *this;
157 }
158
159 inline uint32_t pooling_width() const {
160 return this->pooling_width_;
161 }
162
163 inline size_t output_height() const {
164 const size_t padded_input_height = padding_top() + input_height() + padding_bottom();
165 return padded_input_height / pooling_height();
166 }
167
168 inline size_t output_width() const {
169 const size_t padded_input_width = padding_left() + input_width() + padding_right();
170 return padded_input_width / pooling_width();
171 }
172
173 inline ArgmaxPoolingOperatorTester& input_pixel_stride(size_t input_pixel_stride) {
174 assert(input_pixel_stride != 0);
175 this->input_pixel_stride_ = input_pixel_stride;
176 return *this;
177 }
178
179 inline size_t input_pixel_stride() const {
180 if (this->input_pixel_stride_ == 0) {
181 return channels();
182 } else {
183 assert(this->input_pixel_stride_ >= channels());
184 return this->input_pixel_stride_;
185 }
186 }
187
188 inline ArgmaxPoolingOperatorTester& output_pixel_stride(size_t output_pixel_stride) {
189 assert(output_pixel_stride != 0);
190 this->output_pixel_stride_ = output_pixel_stride;
191 return *this;
192 }
193
194 inline size_t output_pixel_stride() const {
195 if (this->output_pixel_stride_ == 0) {
196 return channels();
197 } else {
198 assert(this->output_pixel_stride_ >= channels());
199 return this->output_pixel_stride_;
200 }
201 }
202
203 inline ArgmaxPoolingOperatorTester& next_input_size(uint32_t next_input_height, uint32_t next_input_width) {
204 assert(next_input_height >= 1);
205 assert(next_input_width >= 1);
206 this->next_input_height_ = next_input_height;
207 this->next_input_width_ = next_input_width;
208 return *this;
209 }
210
211 inline ArgmaxPoolingOperatorTester& next_input_height(uint32_t next_input_height) {
212 assert(next_input_height >= 1);
213 this->next_input_height_ = next_input_height;
214 return *this;
215 }
216
217 inline uint32_t next_input_height() const {
218 if (this->next_input_height_ == 0) {
219 return input_height();
220 } else {
221 return this->next_input_height_;
222 }
223 }
224
225 inline ArgmaxPoolingOperatorTester& next_input_width(uint32_t next_input_width) {
226 assert(next_input_width >= 1);
227 this->next_input_width_ = next_input_width;
228 return *this;
229 }
230
231 inline uint32_t next_input_width() const {
232 if (this->next_input_width_ == 0) {
233 return input_width();
234 } else {
235 return this->next_input_width_;
236 }
237 }
238
239 inline size_t next_output_height() const {
240 const size_t padded_next_input_height = padding_top() + next_input_height() + padding_bottom();
241 return padded_next_input_height / pooling_height();
242 }
243
244 inline size_t next_output_width() const {
245 const size_t padded_next_input_width = padding_left() + next_input_width() + padding_right();
246 return padded_next_input_width / pooling_width();
247 }
248
249 inline ArgmaxPoolingOperatorTester& next_batch_size(size_t next_batch_size) {
250 assert(next_batch_size >= 1);
251 this->next_batch_size_ = next_batch_size;
252 return *this;
253 }
254
255 inline size_t next_batch_size() const {
256 if (this->next_batch_size_ == 0) {
257 return batch_size();
258 } else {
259 return this->next_batch_size_;
260 }
261 }
262
263 inline ArgmaxPoolingOperatorTester& qmin(uint8_t qmin) {
264 this->qmin_ = qmin;
265 return *this;
266 }
267
268 inline uint8_t qmin() const {
269 return this->qmin_;
270 }
271
272 inline ArgmaxPoolingOperatorTester& qmax(uint8_t qmax) {
273 this->qmax_ = qmax;
274 return *this;
275 }
276
277 inline uint8_t qmax() const {
278 return this->qmax_;
279 }
280
281 inline ArgmaxPoolingOperatorTester& iterations(size_t iterations) {
282 this->iterations_ = iterations;
283 return *this;
284 }
285
286 inline size_t iterations() const {
287 return this->iterations_;
288 }
289
290 void TestF32() const {
291 std::random_device random_device;
292 auto rng = std::mt19937(random_device());
293 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
294
295 std::vector<float> input((batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float));
296 std::vector<float> output((batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels());
297 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
298 std::vector<uint32_t> index(batch_size() * output_height() * output_width() * channels());
299 std::vector<uint32_t> index_ref(batch_size() * output_height() * output_width() * channels());
300 for (size_t iteration = 0; iteration < iterations(); iteration++) {
301 std::generate(input.begin(), input.end(), std::ref(f32rng));
302 std::fill(output.begin(), output.end(), nanf(""));
303
304 // Compute reference results, without clamping.
305 for (size_t i = 0; i < batch_size(); i++) {
306 for (size_t oy = 0; oy < output_height(); oy++) {
307 for (size_t ox = 0; ox < output_width(); ox++) {
308 for (size_t c = 0; c < channels(); c++) {
309 const size_t iy_top_left = std::max<size_t>(oy * pooling_height(), padding_top()) - padding_top();
310 const size_t ix_top_left = std::max<size_t>(ox * pooling_width(), padding_left()) - padding_left();
311 float max_value =
312 input[((i * input_height() + iy_top_left) * input_width() + ix_top_left) * input_pixel_stride() + c];
313 uint32_t max_index = 0;
314 for (size_t py = 0; py < pooling_height(); py++) {
315 const size_t iy = oy * pooling_height() + py - padding_top();
316 for (size_t px = 0; px < pooling_width(); px++) {
317 const size_t ix = ox * pooling_width() + px - padding_left();
318 if (ix < input_width() && iy < input_height()) {
319 const float value = input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c];
320 if (value > max_value) {
321 max_value = value;
322 max_index = uint32_t(px * pooling_height() + py);
323 }
324 }
325 }
326 }
327 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = max_value;
328 index_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = max_index;
329 }
330 }
331 }
332 }
333
334 // Compute clamping parameters.
335 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
336 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
337 const float accumulated_range = accumulated_max - accumulated_min;
338 const float output_min = accumulated_range == 0.0f ?
339 -std::numeric_limits<float>::infinity() :
340 accumulated_min + accumulated_range / 255.0f * float(qmin());
341 const float output_max = accumulated_range == 0.0f ?
342 +std::numeric_limits<float>::infinity() :
343 accumulated_max - accumulated_range / 255.0f * float(255 - qmax());
344
345 // Clamp reference results.
346 for (float& value : output_ref) {
347 value = std::max(std::min(value, output_max), output_min);
348 }
349
350 // Create, setup, run, and destroy Argmax Pooling operator.
Marat Dukhan04f03be2019-11-19 12:36:47 -0800351 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700352 xnn_operator_t argmax_pooling_op = nullptr;
353
354 ASSERT_EQ(xnn_status_success,
355 xnn_create_argmax_pooling2d_nhwc_f32(
356 padding_top(), padding_right(), padding_bottom(), padding_left(),
357 pooling_height(), pooling_width(),
358 channels(), input_pixel_stride(), output_pixel_stride(),
359 output_min, output_max,
360 0, &argmax_pooling_op));
361 ASSERT_NE(nullptr, argmax_pooling_op);
362
363 // Smart pointer to automatically delete argmax_pooling_op.
364 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_argmax_pooling_op(argmax_pooling_op, xnn_delete_operator);
365
366 ASSERT_EQ(xnn_status_success,
367 xnn_setup_argmax_pooling2d_nhwc_f32(
368 argmax_pooling_op,
369 batch_size(), input_height(), input_width(),
370 input.data(), output.data(), index.data(),
371 nullptr /* thread pool */));
372
373 ASSERT_EQ(xnn_status_success,
374 xnn_run_operator(argmax_pooling_op, nullptr /* thread pool */));
375
376 // Verify results.
377 for (size_t i = 0; i < batch_size(); i++) {
378 for (size_t y = 0; y < output_height(); y++) {
379 for (size_t x = 0; x < output_width(); x++) {
380 for (size_t c = 0; c < channels(); c++) {
381 ASSERT_LE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_max) <<
382 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
383 ASSERT_GE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_min) <<
384 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
385 ASSERT_EQ(output_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
386 output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c]) <<
387 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
388 ASSERT_EQ(index_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
389 index[((i * output_height() + y) * output_width() + x) * channels() + c]) <<
390 "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
391 }
392 }
393 }
394 }
395 }
396 }
397
398 void TestSetupF32() const {
399 std::random_device random_device;
400 auto rng = std::mt19937(random_device());
401 auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 1.0f), rng);
402
403 std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + std::max(
404 (batch_size() * input_height() * input_width() - 1) * input_pixel_stride() + channels(),
405 (next_batch_size() * next_input_height() * next_input_width() - 1) * input_pixel_stride() + channels()));
406 std::vector<float> output(std::max(
407 (batch_size() * output_height() * output_width() - 1) * output_pixel_stride() + channels(),
408 (next_batch_size() * next_output_height() * next_output_width() - 1) * output_pixel_stride() + channels()));
409 std::vector<uint32_t> index(std::max(
410 batch_size() * output_height() * output_width() * channels(),
411 next_batch_size() * next_output_height() * next_output_width() * channels()));
412 std::vector<float> output_ref(batch_size() * output_height() * output_width() * channels());
413 std::vector<float> next_output_ref(next_batch_size() * next_output_height() * next_output_width() * channels());
414 std::vector<uint32_t> index_ref(batch_size() * output_height() * output_width() * channels());
415 std::vector<uint32_t> next_index_ref(next_batch_size() * next_output_height() * next_output_width() * channels());
416 for (size_t iteration = 0; iteration < iterations(); iteration++) {
417 std::generate(input.begin(), input.end(), std::ref(f32rng));
418 std::fill(output.begin(), output.end(), nanf(""));
419
420 // Compute reference results, without clamping.
421 for (size_t i = 0; i < batch_size(); i++) {
422 for (size_t oy = 0; oy < output_height(); oy++) {
423 for (size_t ox = 0; ox < output_width(); ox++) {
424 for (size_t c = 0; c < channels(); c++) {
425 const size_t iy_top_left = std::max<size_t>(oy * pooling_height(), padding_top()) - padding_top();
426 const size_t ix_top_left = std::max<size_t>(ox * pooling_width(), padding_left()) - padding_left();
427 float max_value =
428 input[((i * input_height() + iy_top_left) * input_width() + ix_top_left) * input_pixel_stride() + c];
429 uint32_t max_index = 0;
430 for (size_t py = 0; py < pooling_height(); py++) {
431 const size_t iy = oy * pooling_height() + py - padding_top();
432 for (size_t px = 0; px < pooling_width(); px++) {
433 const size_t ix = ox * pooling_width() + px - padding_left();
434 if (ix < input_width() && iy < input_height()) {
435 const float value = input[((i * input_height() + iy) * input_width() + ix) * input_pixel_stride() + c];
436 if (value > max_value) {
437 max_value = value;
438 max_index = uint32_t(px * pooling_height() + py);
439 }
440 }
441 }
442 }
443 output_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = max_value;
444 index_ref[((i * output_height() + oy) * output_width() + ox) * channels() + c] = max_index;
445 }
446 }
447 }
448 }
449
450 // Compute clamping parameters.
451 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
452 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
453 const float accumulated_range = accumulated_max - accumulated_min;
454 const float output_min = accumulated_range == 0.0f ?
455 -std::numeric_limits<float>::infinity() :
456 accumulated_min + accumulated_range / 255.0f * float(qmin());
457 const float output_max = accumulated_range == 0.0f ?
458 +std::numeric_limits<float>::infinity() :
459 accumulated_max - accumulated_range / 255.0f * float(255 - qmax());
460
461 // Clamp reference results.
462 for (float& value : output_ref) {
463 value = std::max(std::min(value, output_max), output_min);
464 }
465
466 // Create, setup, and run Argmax Pooling operator once.
Marat Dukhan04f03be2019-11-19 12:36:47 -0800467 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700468 xnn_operator_t argmax_pooling_op = nullptr;
469
470 ASSERT_EQ(xnn_status_success,
471 xnn_create_argmax_pooling2d_nhwc_f32(
472 padding_top(), padding_right(), padding_bottom(), padding_left(),
473 pooling_height(), pooling_width(),
474 channels(), input_pixel_stride(), output_pixel_stride(),
475 output_min, output_max,
476 0, &argmax_pooling_op));
477 ASSERT_NE(nullptr, argmax_pooling_op);
478
479 ASSERT_EQ(xnn_status_success,
480 xnn_setup_argmax_pooling2d_nhwc_f32(
481 argmax_pooling_op,
482 batch_size(), input_height(), input_width(),
483 input.data(), output.data(), index.data(),
484 nullptr /* thread pool */));
485
486 ASSERT_EQ(xnn_status_success,
487 xnn_run_operator(argmax_pooling_op, nullptr /* thread pool */));
488
489 // Verify results of the first run.
490 for (size_t i = 0; i < batch_size(); i++) {
491 for (size_t y = 0; y < output_height(); y++) {
492 for (size_t x = 0; x < output_width(); x++) {
493 for (size_t c = 0; c < channels(); c++) {
494 ASSERT_LE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_max)
495 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
496 ASSERT_GE(output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c], output_min)
497 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
498 ASSERT_EQ(
499 output_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
500 output[((i * output_height() + y) * output_width() + x) * output_pixel_stride() + c])
501 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
502 ASSERT_EQ(
503 index_ref[((i * output_height() + y) * output_width() + x) * channels() + c],
504 index[((i * output_height() + y) * output_width() + x) * channels() + c])
505 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
506 }
507 }
508 }
509 }
510
511 // Re-generate data for the second run.
512 std::generate(input.begin(), input.end(), std::ref(f32rng));
513 std::fill(output.begin(), output.end(), 0xA5);
514
515 // Compute reference results for the second run, including clamping.
516 for (size_t i = 0; i < next_batch_size(); i++) {
517 for (size_t oy = 0; oy < next_output_height(); oy++) {
518 for (size_t ox = 0; ox < next_output_width(); ox++) {
519 for (size_t c = 0; c < channels(); c++) {
520 const size_t iy_top_left = std::max<size_t>(oy * pooling_height(), padding_top()) - padding_top();
521 const size_t ix_top_left = std::max<size_t>(ox * pooling_width(), padding_left()) - padding_left();
522 float max_value =
523 input[((i * next_input_height() + iy_top_left) * next_input_width() + ix_top_left) * input_pixel_stride() + c];
524 uint32_t max_index = 0;
525 for (size_t py = 0; py < pooling_height(); py++) {
526 const size_t iy = oy * pooling_height() + py - padding_top();
527 for (size_t px = 0; px < pooling_width(); px++) {
528 const size_t ix = ox * pooling_width() + px - padding_left();
529 if (ix < next_input_width() && iy < next_input_height()) {
530 const float value = input[((i * next_input_height() + iy) * next_input_width() + ix) * input_pixel_stride() + c];
531 if (value > max_value) {
532 max_value = value;
533 max_index = uint32_t(px * pooling_height() + py);
534 }
535 }
536 }
537 }
538 max_value = std::min(max_value, output_max);
539 max_value = std::max(max_value, output_min);
540 next_output_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = max_value;
541 next_index_ref[((i * next_output_height() + oy) * next_output_width() + ox) * channels() + c] = max_index;
542 }
543 }
544 }
545 }
546
547 // Setup and run Argmax Pooling operator the second time, and destroy the operator.
548 ASSERT_EQ(xnn_status_success,
549 xnn_setup_argmax_pooling2d_nhwc_f32(
550 argmax_pooling_op,
551 next_batch_size(), next_input_height(), next_input_width(),
552 input.data(), output.data(), index.data(),
553 nullptr /* thread pool */));
554
555 ASSERT_EQ(xnn_status_success,
556 xnn_run_operator(argmax_pooling_op, nullptr /* thread pool */));
557
558 ASSERT_EQ(xnn_status_success,
559 xnn_delete_operator(argmax_pooling_op));
560 argmax_pooling_op = nullptr;
561
562 // Verify results of the second run.
563 for (size_t i = 0; i < next_batch_size(); i++) {
564 for (size_t y = 0; y < next_output_height(); y++) {
565 for (size_t x = 0; x < next_output_width(); x++) {
566 for (size_t c = 0; c < channels(); c++) {
567 ASSERT_LE(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c], output_max)
568 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;;
569 ASSERT_GE(output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c], output_min)
570 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;;
571 ASSERT_EQ(
572 next_output_ref[((i * next_output_height() + y) * next_output_width() + x) * channels() + c],
573 output[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c])
574 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
575 ASSERT_EQ(
576 next_index_ref[((i * next_output_height() + y) * next_output_width() + x) * channels() + c],
577 index[((i * next_output_height() + y) * next_output_width() + x) * output_pixel_stride() + c])
578 << "in batch index " << i << ", pixel (" << y << ", " << x << "), channel " << c;
579 }
580 }
581 }
582 }
583 }
584 }
585
586 private:
587 uint32_t padding_top_{0};
588 uint32_t padding_right_{0};
589 uint32_t padding_bottom_{0};
590 uint32_t padding_left_{0};
591 size_t input_height_{1};
592 size_t input_width_{1};
593 size_t channels_{1};
594 size_t batch_size_{1};
595 size_t input_pixel_stride_{0};
596 size_t output_pixel_stride_{0};
597 uint32_t pooling_height_{1};
598 uint32_t pooling_width_{1};
599 size_t next_input_height_{0};
600 size_t next_input_width_{0};
601 size_t next_batch_size_{0};
602 uint8_t qmin_{0};
603 uint8_t qmax_{255};
604 size_t iterations_{1};
605};