blob: e6c3d23ff6404bec07984630e150617b85a102fd [file] [log] [blame]
XNNPACK Teamb455b122019-09-27 18:10:33 -07001// Copyright (c) Facebook, Inc. and its affiliates.
2// All rights reserved.
3//
4// Copyright 2019 Google LLC
5//
6// This source code is licensed under the BSD-style license found in the
7// LICENSE file in the root directory of this source tree.
8
9#pragma once
10
11#include <gtest/gtest.h>
12
13#include <cstddef>
14#include <cstdlib>
15#include <algorithm>
16#include <cmath>
17#include <functional>
18#include <random>
19#include <vector>
20
21#include <xnnpack.h>
22
23
24class GlobalAveragePoolingOperatorTester {
25 public:
26 inline GlobalAveragePoolingOperatorTester& channels(size_t channels) {
27 assert(channels != 0);
28 this->channels_ = channels;
29 return *this;
30 }
31
32 inline size_t channels() const {
33 return this->channels_;
34 }
35
36 inline GlobalAveragePoolingOperatorTester& width(size_t width) {
37 assert(width != 0);
38 this->width_ = width;
39 return *this;
40 }
41
42 inline size_t width() const {
43 return this->width_;
44 }
45
46 inline GlobalAveragePoolingOperatorTester& input_stride(size_t input_stride) {
47 assert(input_stride != 0);
48 this->input_stride_ = input_stride;
49 return *this;
50 }
51
52 inline size_t input_stride() const {
53 if (this->input_stride_ == 0) {
54 return channels();
55 } else {
56 assert(this->input_stride_ >= channels());
57 return this->input_stride_;
58 }
59 }
60
61 inline GlobalAveragePoolingOperatorTester& output_stride(size_t output_stride) {
62 assert(output_stride != 0);
63 this->output_stride_ = output_stride;
64 return *this;
65 }
66
67 inline size_t output_stride() const {
68 if (this->output_stride_ == 0) {
69 return channels();
70 } else {
71 assert(this->output_stride_ >= channels());
72 return this->output_stride_;
73 }
74 }
75
76 inline GlobalAveragePoolingOperatorTester& batch_size(size_t batch_size) {
77 assert(batch_size != 0);
78 this->batch_size_ = batch_size;
79 return *this;
80 }
81
82 inline size_t batch_size() const {
83 return this->batch_size_;
84 }
85
86 inline GlobalAveragePoolingOperatorTester& input_scale(float input_scale) {
87 assert(input_scale > 0.0f);
88 assert(std::isnormal(input_scale));
89 this->input_scale_ = input_scale;
90 return *this;
91 }
92
93 inline float input_scale() const {
94 return this->input_scale_;
95 }
96
97 inline GlobalAveragePoolingOperatorTester& input_zero_point(uint8_t input_zero_point) {
98 this->input_zero_point_ = input_zero_point;
99 return *this;
100 }
101
102 inline uint8_t input_zero_point() const {
103 return this->input_zero_point_;
104 }
105
106 inline GlobalAveragePoolingOperatorTester& output_scale(float output_scale) {
107 assert(output_scale > 0.0f);
108 assert(std::isnormal(output_scale));
109 this->output_scale_ = output_scale;
110 return *this;
111 }
112
113 inline float output_scale() const {
114 return this->output_scale_;
115 }
116
117 inline GlobalAveragePoolingOperatorTester& output_zero_point(uint8_t output_zero_point) {
118 this->output_zero_point_ = output_zero_point;
119 return *this;
120 }
121
122 inline uint8_t output_zero_point() const {
123 return this->output_zero_point_;
124 }
125
126 inline GlobalAveragePoolingOperatorTester& qmin(uint8_t qmin) {
127 this->qmin_ = qmin;
128 return *this;
129 }
130
131 inline uint8_t qmin() const {
132 return this->qmin_;
133 }
134
135 inline GlobalAveragePoolingOperatorTester& qmax(uint8_t qmax) {
136 this->qmax_ = qmax;
137 return *this;
138 }
139
140 inline uint8_t qmax() const {
141 return this->qmax_;
142 }
143
144 inline GlobalAveragePoolingOperatorTester& iterations(size_t iterations) {
145 this->iterations_ = iterations;
146 return *this;
147 }
148
149 inline size_t iterations() const {
150 return this->iterations_;
151 }
152
Marat Dukhanefc47b82019-11-18 09:25:38 -0800153 void TestNWCxQ8() const {
XNNPACK Teamb455b122019-09-27 18:10:33 -0700154 std::random_device random_device;
155 auto rng = std::mt19937(random_device());
156 auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
157
158 std::vector<uint8_t> input((batch_size() * width() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(uint8_t));
159 std::vector<uint8_t> output(batch_size() * output_stride());
160 std::vector<float> output_ref(batch_size() * channels());
161 for (size_t iteration = 0; iteration < iterations(); iteration++) {
162 std::generate(input.begin(), input.end(), std::ref(u8rng));
163 std::fill(output.begin(), output.end(), 0xA5);
164
165 // Compute reference results.
166 const double scale = double(input_scale()) / (double(width()) * double(output_scale()));
167 for (size_t i = 0; i < batch_size(); i++) {
168 for (size_t j = 0; j < channels(); j++) {
169 double acc = 0.0f;
170 for (size_t k = 0; k < width(); k++) {
171 acc += double(int32_t(input[(i * width() + k) * input_stride() + j]) - int32_t(input_zero_point()));
172 }
173 output_ref[i * channels() + j] = float(acc * scale + double(output_zero_point()));
174 output_ref[i * channels() + j] = std::min<float>(output_ref[i * channels() + j], float(qmax()));
175 output_ref[i * channels() + j] = std::max<float>(output_ref[i * channels() + j], float(qmin()));
176 }
177 }
178
179 // Create, setup, run, and destroy Global Average Pooling operator.
Marat Dukhan04f03be2019-11-19 12:36:47 -0800180 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700181 xnn_operator_t global_average_pooling_op = nullptr;
182
183 ASSERT_EQ(xnn_status_success,
184 xnn_create_global_average_pooling_nwc_q8(
185 channels(), input_stride(), output_stride(),
186 input_zero_point(), input_scale(),
187 output_zero_point(), output_scale(),
188 qmin(), qmax(),
189 0, &global_average_pooling_op));
190 ASSERT_NE(nullptr, global_average_pooling_op);
191
192 // Smart pointer to automatically delete global_average_pooling_op.
193 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_global_average_pooling_op(global_average_pooling_op, xnn_delete_operator);
194
195 ASSERT_EQ(xnn_status_success,
196 xnn_setup_global_average_pooling_nwc_q8(
197 global_average_pooling_op,
198 batch_size(), width(),
199 input.data(), output.data(),
200 nullptr /* thread pool */));
201
202 ASSERT_EQ(xnn_status_success,
203 xnn_run_operator(global_average_pooling_op, nullptr /* thread pool */));
204
205 // Verify results.
206 for (size_t i = 0; i < batch_size(); i++) {
207 for (size_t c = 0; c < channels(); c++) {
208 ASSERT_LE(uint32_t(output[i * output_stride() + c]), uint32_t(qmax()));
209 ASSERT_GE(uint32_t(output[i * output_stride() + c]), uint32_t(qmin()));
210 ASSERT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.80f) <<
211 "in batch index " << i << ", channel " << c;
212 }
213 }
214 }
215 }
216
Marat Dukhanefc47b82019-11-18 09:25:38 -0800217 void TestNWCxF32() const {
XNNPACK Teamb455b122019-09-27 18:10:33 -0700218 std::random_device random_device;
219 auto rng = std::mt19937(random_device());
220 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
221
222 std::vector<float> input((batch_size() * width() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float));
223 std::vector<float> output(batch_size() * output_stride());
224 std::vector<float> output_ref(batch_size() * channels());
225 for (size_t iteration = 0; iteration < iterations(); iteration++) {
226 std::generate(input.begin(), input.end(), std::ref(f32rng));
227 std::fill(output.begin(), output.end(), std::nanf(""));
228
229 // Compute reference results, without clamping.
230 for (size_t i = 0; i < batch_size(); i++) {
231 for (size_t j = 0; j < channels(); j++) {
232 float acc = 0.0f;
233 for (size_t k = 0; k < width(); k++) {
234 acc += input[(i * width() + k) * input_stride() + j];
235 }
236 output_ref[i * channels() + j] = acc / float(width());
237 }
238 }
239
240 // Compute clamping parameters.
241 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
242 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
243 const float accumulated_range = accumulated_max - accumulated_min;
244 const float output_min = accumulated_range == 0.0f ?
245 -std::numeric_limits<float>::infinity() :
246 accumulated_min + accumulated_range / 255.0f * float(qmin());
247 const float output_max = accumulated_range == 0.0f ?
248 +std::numeric_limits<float>::infinity() :
249 accumulated_max - accumulated_range / 255.0f * float(255 - qmax());
250
251 // Clamp reference results.
252 for (float& value : output_ref) {
253 value = std::max(std::min(value, output_max), output_min);
254 }
255
256 // Create, setup, run, and destroy Global Average Pooling operator.
Marat Dukhan04f03be2019-11-19 12:36:47 -0800257 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
XNNPACK Teamb455b122019-09-27 18:10:33 -0700258 xnn_operator_t global_average_pooling_op = nullptr;
259
260 ASSERT_EQ(xnn_status_success,
261 xnn_create_global_average_pooling_nwc_f32(
262 channels(), input_stride(), output_stride(),
263 output_min, output_max,
264 0, &global_average_pooling_op));
265 ASSERT_NE(nullptr, global_average_pooling_op);
266
267 // Smart pointer to automatically delete global_average_pooling_op.
268 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_global_average_pooling_op(global_average_pooling_op, xnn_delete_operator);
269
270 ASSERT_EQ(xnn_status_success,
271 xnn_setup_global_average_pooling_nwc_f32(
272 global_average_pooling_op,
273 batch_size(), width(),
274 input.data(), output.data(),
275 nullptr /* thread pool */));
276
277 ASSERT_EQ(xnn_status_success,
278 xnn_run_operator(global_average_pooling_op, nullptr /* thread pool */));
279
280 // Verify results.
281 for (size_t i = 0; i < batch_size(); i++) {
282 for (size_t c = 0; c < channels(); c++) {
283 ASSERT_LE(output[i * output_stride() + c], output_max);
284 ASSERT_GE(output[i * output_stride() + c], output_min);
285 ASSERT_NEAR(output[i * output_stride() + c], output_ref[i * channels() + c], std::abs(output_ref[i * channels() + c]) * 1.0e-6f) <<
286 "in batch index " << i << ", channel " << c;
287 }
288 }
289 }
290 }
291
Marat Dukhanefc47b82019-11-18 09:25:38 -0800292 void TestNCWxF32() const {
293 std::random_device random_device;
294 auto rng = std::mt19937(random_device());
295 auto f32rng = std::bind(std::uniform_real_distribution<float>(), rng);
296
297 std::vector<float> input(batch_size() * channels() * width() + XNN_EXTRA_BYTES / sizeof(float));
298 std::vector<float> output(batch_size() * channels());
299 std::vector<float> output_ref(batch_size() * channels());
300 for (size_t iteration = 0; iteration < iterations(); iteration++) {
301 std::generate(input.begin(), input.end(), std::ref(f32rng));
302 std::fill(output.begin(), output.end(), std::nanf(""));
303
304 // Compute reference results, without clamping.
305 for (size_t i = 0; i < batch_size(); i++) {
306 for (size_t j = 0; j < channels(); j++) {
307 float acc = 0.0f;
308 for (size_t k = 0; k < width(); k++) {
309 acc += input[(i * channels() + j) * width() + k];
310 }
311 output_ref[i * channels() + j] = acc / float(width());
312 }
313 }
314
315 // Compute clamping parameters.
316 const float accumulated_min = *std::min_element(output_ref.cbegin(), output_ref.cend());
317 const float accumulated_max = *std::max_element(output_ref.cbegin(), output_ref.cend());
318 const float accumulated_range = accumulated_max - accumulated_min;
319 const float output_min = accumulated_range == 0.0f ?
320 -std::numeric_limits<float>::infinity() :
321 accumulated_min + accumulated_range / 255.0f * float(qmin());
322 const float output_max = accumulated_range == 0.0f ?
323 +std::numeric_limits<float>::infinity() :
324 accumulated_max - accumulated_range / 255.0f * float(255 - qmax());
325
326 // Clamp reference results.
327 for (float& value : output_ref) {
328 value = std::max(std::min(value, output_max), output_min);
329 }
330
331 // Create, setup, run, and destroy Global Average Pooling operator.
Marat Dukhan04f03be2019-11-19 12:36:47 -0800332 ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */));
Marat Dukhanefc47b82019-11-18 09:25:38 -0800333 xnn_operator_t global_average_pooling_op = nullptr;
334
335 xnn_status status = xnn_create_global_average_pooling_ncw_f32(
336 channels(), output_min, output_max,
337 0, &global_average_pooling_op);
338 if (status == xnn_status_unsupported_parameter) {
339 GTEST_SKIP();
340 }
341 ASSERT_EQ(xnn_status_success, status);
342
343 // Smart pointer to automatically delete global_average_pooling_op.
344 std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_global_average_pooling_op(global_average_pooling_op, xnn_delete_operator);
345
346 ASSERT_EQ(xnn_status_success,
347 xnn_setup_global_average_pooling_ncw_f32(
348 global_average_pooling_op,
349 batch_size(), width(),
350 input.data(), output.data(),
351 nullptr /* thread pool */));
352
353 ASSERT_EQ(xnn_status_success,
354 xnn_run_operator(global_average_pooling_op, nullptr /* thread pool */));
355
356 // Verify results.
357 for (size_t i = 0; i < batch_size(); i++) {
358 for (size_t c = 0; c < channels(); c++) {
359 ASSERT_LE(output[i * channels() + c], output_max);
360 ASSERT_GE(output[i * channels() + c], output_min);
361 ASSERT_NEAR(output[i * channels() + c], output_ref[i * channels() + c], std::abs(output_ref[i * channels() + c]) * 1.0e-5f) <<
362 "in batch index " << i << ", channel " << c;
363 }
364 }
365 }
366 }
367
XNNPACK Teamb455b122019-09-27 18:10:33 -0700368 private:
369 size_t batch_size_{1};
370 size_t width_{1};
371 size_t channels_{1};
372 size_t input_stride_{0};
373 size_t output_stride_{0};
374 float input_scale_{1.0f};
375 float output_scale_{1.0f};
376 uint8_t input_zero_point_{121};
377 uint8_t output_zero_point_{133};
378 uint8_t qmin_{0};
379 uint8_t qmax_{255};
380 size_t iterations_{1};
381};