XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 1 | // Copyright (c) Facebook, Inc. and its affiliates. |
| 2 | // All rights reserved. |
| 3 | // |
| 4 | // Copyright 2019 Google LLC |
| 5 | // |
| 6 | // This source code is licensed under the BSD-style license found in the |
| 7 | // LICENSE file in the root directory of this source tree. |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | #include <gtest/gtest.h> |
| 12 | |
| 13 | #include <algorithm> |
| 14 | #include <cassert> |
| 15 | #include <cstddef> |
| 16 | #include <cstdlib> |
| 17 | #include <functional> |
| 18 | #include <random> |
| 19 | #include <vector> |
| 20 | |
| 21 | #include <xnnpack.h> |
| 22 | |
| 23 | |
| 24 | class ClampOperatorTester { |
| 25 | public: |
| 26 | inline ClampOperatorTester& channels(size_t channels) { |
| 27 | assert(channels != 0); |
| 28 | this->channels_ = channels; |
| 29 | return *this; |
| 30 | } |
| 31 | |
| 32 | inline size_t channels() const { |
| 33 | return this->channels_; |
| 34 | } |
| 35 | |
| 36 | inline ClampOperatorTester& input_stride(size_t input_stride) { |
| 37 | assert(input_stride != 0); |
| 38 | this->input_stride_ = input_stride; |
| 39 | return *this; |
| 40 | } |
| 41 | |
| 42 | inline size_t input_stride() const { |
| 43 | if (this->input_stride_ == 0) { |
| 44 | return this->channels_; |
| 45 | } else { |
| 46 | assert(this->input_stride_ >= this->channels_); |
| 47 | return this->input_stride_; |
| 48 | } |
| 49 | } |
| 50 | |
| 51 | inline ClampOperatorTester& output_stride(size_t output_stride) { |
| 52 | assert(output_stride != 0); |
| 53 | this->output_stride_ = output_stride; |
| 54 | return *this; |
| 55 | } |
| 56 | |
| 57 | inline size_t output_stride() const { |
| 58 | if (this->output_stride_ == 0) { |
| 59 | return this->channels_; |
| 60 | } else { |
| 61 | assert(this->output_stride_ >= this->channels_); |
| 62 | return this->output_stride_; |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | inline ClampOperatorTester& batch_size(size_t batch_size) { |
| 67 | assert(batch_size != 0); |
| 68 | this->batch_size_ = batch_size; |
| 69 | return *this; |
| 70 | } |
| 71 | |
| 72 | inline size_t batch_size() const { |
| 73 | return this->batch_size_; |
| 74 | } |
| 75 | |
| 76 | inline ClampOperatorTester& qmin(uint8_t qmin) { |
| 77 | this->qmin_ = qmin; |
| 78 | return *this; |
| 79 | } |
| 80 | |
| 81 | inline uint8_t qmin() const { |
| 82 | return this->qmin_; |
| 83 | } |
| 84 | |
| 85 | inline ClampOperatorTester& qmax(uint8_t qmax) { |
| 86 | this->qmax_ = qmax; |
| 87 | return *this; |
| 88 | } |
| 89 | |
| 90 | inline uint8_t qmax() const { |
| 91 | return this->qmax_; |
| 92 | } |
| 93 | |
| 94 | inline ClampOperatorTester& iterations(size_t iterations) { |
| 95 | this->iterations_ = iterations; |
| 96 | return *this; |
| 97 | } |
| 98 | |
| 99 | inline size_t iterations() const { |
| 100 | return this->iterations_; |
| 101 | } |
| 102 | |
| 103 | void TestU8() const { |
| 104 | std::random_device random_device; |
| 105 | auto rng = std::mt19937(random_device()); |
| 106 | auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng); |
| 107 | |
| 108 | std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + |
| 109 | (batch_size() - 1) * input_stride() + channels()); |
| 110 | std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); |
| 111 | std::vector<uint8_t> output_ref(batch_size() * channels()); |
| 112 | for (size_t iteration = 0; iteration < iterations(); iteration++) { |
| 113 | std::generate(input.begin(), input.end(), std::ref(u8rng)); |
| 114 | std::fill(output.begin(), output.end(), 0xA5); |
| 115 | |
| 116 | // Compute reference results. |
| 117 | for (size_t i = 0; i < batch_size(); i++) { |
| 118 | for (size_t c = 0; c < channels(); c++) { |
| 119 | const uint8_t x = input[i * input_stride() + c]; |
| 120 | const uint8_t y = std::min(std::max(x, qmin()), qmax()); |
| 121 | output_ref[i * channels() + c] = y; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | // Create, setup, run, and destroy Clamp operator. |
Marat Dukhan | 04f03be | 2019-11-19 12:36:47 -0800 | [diff] [blame] | 126 | ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 127 | xnn_operator_t clamp_op = nullptr; |
| 128 | |
| 129 | ASSERT_EQ(xnn_status_success, |
| 130 | xnn_create_clamp_nc_u8( |
| 131 | channels(), input_stride(), output_stride(), |
| 132 | qmin(), qmax(), |
| 133 | 0, &clamp_op)); |
| 134 | ASSERT_NE(nullptr, clamp_op); |
| 135 | |
| 136 | // Smart pointer to automatically delete clamp_op. |
| 137 | std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_clamp_op(clamp_op, xnn_delete_operator); |
| 138 | |
| 139 | ASSERT_EQ(xnn_status_success, |
| 140 | xnn_setup_clamp_nc_u8( |
| 141 | clamp_op, |
| 142 | batch_size(), |
| 143 | input.data(), output.data(), |
| 144 | nullptr /* thread pool */)); |
| 145 | |
| 146 | ASSERT_EQ(xnn_status_success, |
| 147 | xnn_run_operator(clamp_op, nullptr /* thread pool */)); |
| 148 | |
| 149 | // Verify results . |
| 150 | for (size_t i = 0; i < batch_size(); i++) { |
| 151 | for (size_t c = 0; c < channels(); c++) { |
| 152 | ASSERT_LE(uint32_t(output[i * output_stride() + c]), uint32_t(qmax())) |
| 153 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels(); |
| 154 | ASSERT_GE(uint32_t(output[i * output_stride() + c]), uint32_t(qmin())) |
| 155 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels(); |
| 156 | ASSERT_EQ(uint32_t(output_ref[i * channels() + c]), uint32_t(output[i * output_stride() + c])) |
| 157 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels() |
| 158 | << ", qmin = " << uint32_t(qmin()) << ", qmax = " << uint32_t(qmax()); |
| 159 | } |
| 160 | } |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | void TestF32() const { |
| 165 | std::random_device random_device; |
| 166 | auto rng = std::mt19937(random_device()); |
| 167 | auto f32rng = std::bind(std::uniform_real_distribution<float>(0.0f, 255.0f), rng); |
| 168 | |
| 169 | std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + |
| 170 | (batch_size() - 1) * input_stride() + channels()); |
| 171 | std::vector<float> output((batch_size() - 1) * output_stride() + channels()); |
| 172 | std::vector<float> output_ref(batch_size() * channels()); |
| 173 | for (size_t iteration = 0; iteration < iterations(); iteration++) { |
| 174 | std::generate(input.begin(), input.end(), std::ref(f32rng)); |
| 175 | std::fill(output.begin(), output.end(), std::nanf("")); |
| 176 | |
| 177 | // Compute reference results. |
| 178 | for (size_t i = 0; i < batch_size(); i++) { |
| 179 | for (size_t c = 0; c < channels(); c++) { |
| 180 | const float x = input[i * input_stride() + c]; |
| 181 | const float y = std::min(std::max(x, float(qmin())), float(qmax())); |
| 182 | output_ref[i * channels() + c] = y; |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | // Create, setup, run, and destroy Clamp operator. |
Marat Dukhan | 04f03be | 2019-11-19 12:36:47 -0800 | [diff] [blame] | 187 | ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); |
XNNPACK Team | b455b12 | 2019-09-27 18:10:33 -0700 | [diff] [blame] | 188 | xnn_operator_t clamp_op = nullptr; |
| 189 | |
| 190 | ASSERT_EQ(xnn_status_success, |
| 191 | xnn_create_clamp_nc_f32( |
| 192 | channels(), input_stride(), output_stride(), |
| 193 | float(qmin()), float(qmax()), |
| 194 | 0, &clamp_op)); |
| 195 | ASSERT_NE(nullptr, clamp_op); |
| 196 | |
| 197 | // Smart pointer to automatically delete clamp_op. |
| 198 | std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_clamp_op(clamp_op, xnn_delete_operator); |
| 199 | |
| 200 | ASSERT_EQ(xnn_status_success, |
| 201 | xnn_setup_clamp_nc_f32( |
| 202 | clamp_op, |
| 203 | batch_size(), |
| 204 | input.data(), output.data(), |
| 205 | nullptr /* thread pool */)); |
| 206 | |
| 207 | ASSERT_EQ(xnn_status_success, |
| 208 | xnn_run_operator(clamp_op, nullptr /* thread pool */)); |
| 209 | |
| 210 | // Verify results. |
| 211 | for (size_t i = 0; i < batch_size(); i++) { |
| 212 | for (size_t c = 0; c < channels(); c++) { |
| 213 | ASSERT_LE(output[i * output_stride() + c], float(qmax())) |
| 214 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels(); |
| 215 | ASSERT_GE(output[i * output_stride() + c], float(qmin())) |
| 216 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels(); |
| 217 | ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) |
| 218 | << "at position " << i << ", batch size = " << batch_size() << ", channels = " << channels() |
| 219 | << ", qmin = " << uint32_t(qmin()) << ", qmax = " << uint32_t(qmax()); |
| 220 | } |
| 221 | } |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | private: |
| 226 | size_t batch_size_{1}; |
| 227 | size_t channels_{1}; |
| 228 | size_t input_stride_{0}; |
| 229 | size_t output_stride_{0}; |
| 230 | uint8_t qmin_{5}; |
| 231 | uint8_t qmax_{250}; |
| 232 | size_t iterations_{15}; |
| 233 | }; |