Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 1 | // Copyright 2019 Google LLC |
| 2 | // |
| 3 | // This source code is licensed under the BSD-style license found in the |
| 4 | // LICENSE file in the root directory of this source tree. |
| 5 | |
| 6 | #pragma once |
| 7 | |
| 8 | #include <gtest/gtest.h> |
| 9 | |
| 10 | #include <algorithm> |
| 11 | #include <array> |
| 12 | #include <cstddef> |
| 13 | #include <cstdlib> |
| 14 | #include <functional> |
| 15 | #include <initializer_list> |
| 16 | #include <numeric> |
| 17 | #include <random> |
| 18 | #include <vector> |
| 19 | |
| 20 | #include <xnnpack.h> |
| 21 | |
| 22 | |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 23 | class ConstantPadOperatorTester { |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 24 | public: |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 25 | inline ConstantPadOperatorTester& input_shape(std::initializer_list<size_t> input_shape) { |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 26 | assert(input_shape.size() <= XNN_MAX_TENSOR_DIMS); |
| 27 | input_shape_ = std::vector<size_t>(input_shape); |
| 28 | return *this; |
| 29 | } |
| 30 | |
| 31 | inline const std::vector<size_t>& input_shape() const { |
| 32 | return input_shape_; |
| 33 | } |
| 34 | |
| 35 | inline size_t input_dim(size_t i) const { |
| 36 | return i < input_shape_.size() ? input_shape_[i] : 1; |
| 37 | } |
| 38 | |
| 39 | inline size_t num_dims() const { |
| 40 | return input_shape_.size(); |
| 41 | } |
| 42 | |
| 43 | inline size_t num_input_elements() const { |
| 44 | return std::accumulate( |
| 45 | input_shape_.cbegin(), input_shape_.cend(), size_t(1), std::multiplies<size_t>()); |
| 46 | } |
| 47 | |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 48 | inline ConstantPadOperatorTester& pre_paddings(std::initializer_list<size_t> pre_paddings) { |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 49 | assert(pre_paddings.size() <= XNN_MAX_TENSOR_DIMS); |
| 50 | pre_paddings_ = std::vector<size_t>(pre_paddings); |
| 51 | return *this; |
| 52 | } |
| 53 | |
| 54 | inline const std::vector<size_t>& pre_paddings() const { |
| 55 | return pre_paddings_; |
| 56 | } |
| 57 | |
| 58 | inline size_t pre_padding(size_t i) const { |
| 59 | return i < pre_paddings_.size() ? pre_paddings_[i] : 0; |
| 60 | } |
| 61 | |
| 62 | inline size_t num_pre_paddings() const { |
| 63 | return pre_paddings_.size(); |
| 64 | } |
| 65 | |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 66 | inline ConstantPadOperatorTester& post_paddings(std::initializer_list<size_t> post_paddings) { |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 67 | assert(post_paddings.size() <= XNN_MAX_TENSOR_DIMS); |
| 68 | post_paddings_ = std::vector<size_t>(post_paddings); |
| 69 | return *this; |
| 70 | } |
| 71 | |
| 72 | inline const std::vector<size_t>& post_paddings() const { |
| 73 | return post_paddings_; |
| 74 | } |
| 75 | |
| 76 | inline size_t post_padding(size_t i) const { |
| 77 | return i < post_paddings_.size() ? post_paddings_[i] : 0; |
| 78 | } |
| 79 | |
| 80 | inline size_t num_post_paddings() const { |
| 81 | return post_paddings_.size(); |
| 82 | } |
| 83 | |
| 84 | inline size_t output_dim(size_t i) const { |
| 85 | return pre_padding(i) + input_dim(i) + post_padding(i); |
| 86 | } |
| 87 | |
| 88 | inline size_t num_output_elements() const { |
| 89 | size_t elements = 1; |
| 90 | for (size_t i = 0; i < num_dims(); i++) { |
| 91 | elements *= output_dim(i); |
| 92 | } |
| 93 | return elements; |
| 94 | } |
| 95 | |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 96 | inline ConstantPadOperatorTester& iterations(size_t iterations) { |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 97 | this->iterations_ = iterations; |
| 98 | return *this; |
| 99 | } |
| 100 | |
| 101 | inline size_t iterations() const { |
| 102 | return this->iterations_; |
| 103 | } |
| 104 | |
Marat Dukhan | 139e961 | 2021-08-09 09:03:07 -0700 | [diff] [blame] | 105 | void TestX8() const { |
| 106 | ASSERT_EQ(num_dims(), num_pre_paddings()); |
| 107 | ASSERT_EQ(num_dims(), num_post_paddings()); |
| 108 | |
| 109 | std::random_device random_device; |
| 110 | auto rng = std::mt19937(random_device()); |
| 111 | auto u8rng = std::bind(std::uniform_int_distribution<uint32_t>(0, std::numeric_limits<uint8_t>::max()), rng); |
| 112 | |
| 113 | // Compute generalized shapes. |
| 114 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; |
| 115 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings; |
| 116 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings; |
| 117 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; |
| 118 | std::fill(input_dims.begin(), input_dims.end(), 1); |
| 119 | std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0); |
| 120 | std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0); |
| 121 | std::fill(output_dims.begin(), output_dims.end(), 1); |
| 122 | for (size_t i = 0; i < num_dims(); i++) { |
| 123 | input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); |
| 124 | input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i); |
| 125 | input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i); |
| 126 | output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); |
| 127 | } |
| 128 | |
| 129 | // Compute generalized strides. |
| 130 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides; |
| 131 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides; |
| 132 | size_t input_stride = 1, output_stride = 1; |
| 133 | for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) { |
| 134 | input_strides[i - 1] = input_stride; |
| 135 | output_strides[i - 1] = output_stride; |
| 136 | input_stride *= input_dims[i - 1]; |
| 137 | output_stride *= output_dims[i - 1]; |
| 138 | } |
| 139 | |
| 140 | std::vector<uint8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + num_input_elements()); |
| 141 | std::vector<uint8_t> output(num_output_elements()); |
| 142 | std::vector<uint8_t> output_ref(num_output_elements()); |
| 143 | for (size_t iteration = 0; iteration < iterations(); iteration++) { |
| 144 | std::generate(input.begin(), input.end(), std::ref(u8rng)); |
| 145 | std::fill(output.begin(), output.end(), UINT32_C(0xAA)); |
| 146 | const uint8_t padding_value = u8rng(); |
| 147 | |
| 148 | // Compute reference results. |
| 149 | std::fill(output_ref.begin(), output_ref.end(), padding_value); |
| 150 | for (size_t i = 0; i < input_dims[0]; i++) { |
| 151 | for (size_t j = 0; j < input_dims[1]; j++) { |
| 152 | for (size_t k = 0; k < input_dims[2]; k++) { |
| 153 | for (size_t l = 0; l < input_dims[3]; l++) { |
| 154 | for (size_t m = 0; m < input_dims[4]; m++) { |
| 155 | for (size_t n = 0; n < input_dims[5]; n++) { |
| 156 | const size_t output_index = |
| 157 | (i + input_pre_paddings[0]) * output_strides[0] + |
| 158 | (j + input_pre_paddings[1]) * output_strides[1] + |
| 159 | (k + input_pre_paddings[2]) * output_strides[2] + |
| 160 | (l + input_pre_paddings[3]) * output_strides[3] + |
| 161 | (m + input_pre_paddings[4]) * output_strides[4] + |
| 162 | (n + input_pre_paddings[5]) * output_strides[5]; |
| 163 | const size_t input_index = |
| 164 | i * input_strides[0] + j * input_strides[1] + k * input_strides[2] + |
| 165 | l * input_strides[3] + m * input_strides[4] + n * input_strides[5]; |
| 166 | output_ref[output_index] = input[input_index]; |
| 167 | } |
| 168 | } |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | } |
| 173 | |
| 174 | // Create, setup, run, and destroy a binary elementwise operator. |
| 175 | ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); |
| 176 | xnn_operator_t pad_op = nullptr; |
| 177 | |
| 178 | ASSERT_EQ(xnn_status_success, |
| 179 | xnn_create_constant_pad_nd_x8( |
| 180 | &padding_value, 0, &pad_op)); |
| 181 | ASSERT_NE(nullptr, pad_op); |
| 182 | |
| 183 | // Smart pointer to automatically delete pad_op. |
| 184 | std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator); |
| 185 | |
| 186 | ASSERT_EQ(xnn_status_success, |
| 187 | xnn_setup_constant_pad_nd_x8( |
| 188 | pad_op, |
| 189 | num_dims(), |
| 190 | input_shape().data(), pre_paddings().data(), post_paddings().data(), |
| 191 | input.data(), output.data(), |
| 192 | nullptr /* thread pool */)); |
| 193 | |
| 194 | ASSERT_EQ(xnn_status_success, |
| 195 | xnn_run_operator(pad_op, nullptr /* thread pool */)); |
| 196 | |
| 197 | // Verify results. |
| 198 | for (size_t i = 0; i < output_dims[0]; i++) { |
| 199 | for (size_t j = 0; j < output_dims[1]; j++) { |
| 200 | for (size_t k = 0; k < output_dims[2]; k++) { |
| 201 | for (size_t l = 0; l < output_dims[3]; l++) { |
| 202 | for (size_t m = 0; m < output_dims[4]; m++) { |
| 203 | for (size_t n = 0; n < output_dims[5]; n++) { |
| 204 | const size_t index = |
| 205 | i * output_strides[0] + j * output_strides[1] + k * output_strides[2] + |
| 206 | l * output_strides[3] + m * output_strides[4] + n * output_strides[5]; |
| 207 | ASSERT_EQ(output[index], output_ref[index]) |
| 208 | << "(i, j, k, l, m, n) = (" |
| 209 | << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")" |
| 210 | << ", padding value = " << padding_value; |
| 211 | } |
| 212 | } |
| 213 | } |
| 214 | } |
| 215 | } |
| 216 | } |
| 217 | } |
| 218 | } |
| 219 | |
Marat Dukhan | 6b45a7f | 2022-02-03 19:21:41 -0800 | [diff] [blame] | 220 | void TestX16() const { |
| 221 | ASSERT_EQ(num_dims(), num_pre_paddings()); |
| 222 | ASSERT_EQ(num_dims(), num_post_paddings()); |
| 223 | |
| 224 | std::random_device random_device; |
| 225 | auto rng = std::mt19937(random_device()); |
| 226 | auto u16rng = std::bind(std::uniform_int_distribution<uint16_t>(), rng); |
| 227 | |
| 228 | // Compute generalized shapes. |
| 229 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; |
| 230 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings; |
| 231 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings; |
| 232 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; |
| 233 | std::fill(input_dims.begin(), input_dims.end(), 1); |
| 234 | std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0); |
| 235 | std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0); |
| 236 | std::fill(output_dims.begin(), output_dims.end(), 1); |
| 237 | for (size_t i = 0; i < num_dims(); i++) { |
| 238 | input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); |
| 239 | input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i); |
| 240 | input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i); |
| 241 | output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); |
| 242 | } |
| 243 | |
| 244 | // Compute generalized strides. |
| 245 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides; |
| 246 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides; |
| 247 | size_t input_stride = 1, output_stride = 1; |
| 248 | for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) { |
| 249 | input_strides[i - 1] = input_stride; |
| 250 | output_strides[i - 1] = output_stride; |
| 251 | input_stride *= input_dims[i - 1]; |
| 252 | output_stride *= output_dims[i - 1]; |
| 253 | } |
| 254 | |
| 255 | std::vector<uint16_t> input(XNN_EXTRA_BYTES / sizeof(uint16_t) + num_input_elements()); |
| 256 | std::vector<uint16_t> output(num_output_elements()); |
| 257 | std::vector<uint16_t> output_ref(num_output_elements()); |
| 258 | for (size_t iteration = 0; iteration < iterations(); iteration++) { |
| 259 | std::generate(input.begin(), input.end(), std::ref(u16rng)); |
| 260 | std::fill(output.begin(), output.end(), UINT16_C(0xDEAD)); |
| 261 | const uint16_t padding_value = u16rng(); |
| 262 | |
| 263 | // Compute reference results. |
| 264 | std::fill(output_ref.begin(), output_ref.end(), padding_value); |
| 265 | for (size_t i = 0; i < input_dims[0]; i++) { |
| 266 | for (size_t j = 0; j < input_dims[1]; j++) { |
| 267 | for (size_t k = 0; k < input_dims[2]; k++) { |
| 268 | for (size_t l = 0; l < input_dims[3]; l++) { |
| 269 | for (size_t m = 0; m < input_dims[4]; m++) { |
| 270 | for (size_t n = 0; n < input_dims[5]; n++) { |
| 271 | const size_t output_index = |
| 272 | (i + input_pre_paddings[0]) * output_strides[0] + |
| 273 | (j + input_pre_paddings[1]) * output_strides[1] + |
| 274 | (k + input_pre_paddings[2]) * output_strides[2] + |
| 275 | (l + input_pre_paddings[3]) * output_strides[3] + |
| 276 | (m + input_pre_paddings[4]) * output_strides[4] + |
| 277 | (n + input_pre_paddings[5]) * output_strides[5]; |
| 278 | const size_t input_index = |
| 279 | i * input_strides[0] + j * input_strides[1] + k * input_strides[2] + |
| 280 | l * input_strides[3] + m * input_strides[4] + n * input_strides[5]; |
| 281 | output_ref[output_index] = input[input_index]; |
| 282 | } |
| 283 | } |
| 284 | } |
| 285 | } |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | // Create, setup, run, and destroy a binary elementwise operator. |
| 290 | ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); |
| 291 | xnn_operator_t pad_op = nullptr; |
| 292 | |
| 293 | ASSERT_EQ(xnn_status_success, |
| 294 | xnn_create_constant_pad_nd_x16( |
| 295 | &padding_value, 0, &pad_op)); |
| 296 | ASSERT_NE(nullptr, pad_op); |
| 297 | |
| 298 | // Smart pointer to automatically delete pad_op. |
| 299 | std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator); |
| 300 | |
| 301 | ASSERT_EQ(xnn_status_success, |
| 302 | xnn_setup_constant_pad_nd_x16( |
| 303 | pad_op, |
| 304 | num_dims(), |
| 305 | input_shape().data(), pre_paddings().data(), post_paddings().data(), |
| 306 | input.data(), output.data(), |
| 307 | nullptr /* thread pool */)); |
| 308 | |
| 309 | ASSERT_EQ(xnn_status_success, |
| 310 | xnn_run_operator(pad_op, nullptr /* thread pool */)); |
| 311 | |
| 312 | // Verify results. |
| 313 | for (size_t i = 0; i < output_dims[0]; i++) { |
| 314 | for (size_t j = 0; j < output_dims[1]; j++) { |
| 315 | for (size_t k = 0; k < output_dims[2]; k++) { |
| 316 | for (size_t l = 0; l < output_dims[3]; l++) { |
| 317 | for (size_t m = 0; m < output_dims[4]; m++) { |
| 318 | for (size_t n = 0; n < output_dims[5]; n++) { |
| 319 | const size_t index = |
| 320 | i * output_strides[0] + j * output_strides[1] + k * output_strides[2] + |
| 321 | l * output_strides[3] + m * output_strides[4] + n * output_strides[5]; |
| 322 | ASSERT_EQ(output[index], output_ref[index]) |
| 323 | << "(i, j, k, l, m, n) = (" |
| 324 | << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")" |
| 325 | << ", padding value = " << padding_value; |
| 326 | } |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | } |
| 331 | } |
| 332 | } |
| 333 | } |
| 334 | |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 335 | void TestX32() const { |
| 336 | ASSERT_EQ(num_dims(), num_pre_paddings()); |
| 337 | ASSERT_EQ(num_dims(), num_post_paddings()); |
| 338 | |
| 339 | std::random_device random_device; |
| 340 | auto rng = std::mt19937(random_device()); |
| 341 | auto u32rng = std::bind(std::uniform_int_distribution<uint32_t>(), rng); |
| 342 | |
| 343 | // Compute generalized shapes. |
| 344 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_dims; |
| 345 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_pre_paddings; |
| 346 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_post_paddings; |
| 347 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_dims; |
| 348 | std::fill(input_dims.begin(), input_dims.end(), 1); |
| 349 | std::fill(input_pre_paddings.begin(), input_pre_paddings.end(), 0); |
| 350 | std::fill(input_post_paddings.begin(), input_post_paddings.end(), 0); |
| 351 | std::fill(output_dims.begin(), output_dims.end(), 1); |
| 352 | for (size_t i = 0; i < num_dims(); i++) { |
| 353 | input_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = input_dim(i); |
| 354 | input_pre_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = pre_padding(i); |
| 355 | input_post_paddings[XNN_MAX_TENSOR_DIMS - num_dims() + i] = post_padding(i); |
| 356 | output_dims[XNN_MAX_TENSOR_DIMS - num_dims() + i] = output_dim(i); |
| 357 | } |
| 358 | |
| 359 | // Compute generalized strides. |
| 360 | std::array<size_t, XNN_MAX_TENSOR_DIMS> input_strides; |
| 361 | std::array<size_t, XNN_MAX_TENSOR_DIMS> output_strides; |
| 362 | size_t input_stride = 1, output_stride = 1; |
| 363 | for (size_t i = XNN_MAX_TENSOR_DIMS; i != 0; i--) { |
| 364 | input_strides[i - 1] = input_stride; |
| 365 | output_strides[i - 1] = output_stride; |
| 366 | input_stride *= input_dims[i - 1]; |
| 367 | output_stride *= output_dims[i - 1]; |
| 368 | } |
| 369 | |
| 370 | std::vector<uint32_t> input(XNN_EXTRA_BYTES / sizeof(uint32_t) + num_input_elements()); |
| 371 | std::vector<uint32_t> output(num_output_elements()); |
| 372 | std::vector<uint32_t> output_ref(num_output_elements()); |
| 373 | for (size_t iteration = 0; iteration < iterations(); iteration++) { |
| 374 | std::generate(input.begin(), input.end(), std::ref(u32rng)); |
| 375 | std::fill(output.begin(), output.end(), UINT32_C(0xDEADBEEF)); |
| 376 | const uint32_t padding_value = u32rng(); |
| 377 | |
| 378 | // Compute reference results. |
| 379 | std::fill(output_ref.begin(), output_ref.end(), padding_value); |
| 380 | for (size_t i = 0; i < input_dims[0]; i++) { |
| 381 | for (size_t j = 0; j < input_dims[1]; j++) { |
| 382 | for (size_t k = 0; k < input_dims[2]; k++) { |
| 383 | for (size_t l = 0; l < input_dims[3]; l++) { |
| 384 | for (size_t m = 0; m < input_dims[4]; m++) { |
| 385 | for (size_t n = 0; n < input_dims[5]; n++) { |
| 386 | const size_t output_index = |
| 387 | (i + input_pre_paddings[0]) * output_strides[0] + |
| 388 | (j + input_pre_paddings[1]) * output_strides[1] + |
| 389 | (k + input_pre_paddings[2]) * output_strides[2] + |
| 390 | (l + input_pre_paddings[3]) * output_strides[3] + |
| 391 | (m + input_pre_paddings[4]) * output_strides[4] + |
| 392 | (n + input_pre_paddings[5]) * output_strides[5]; |
| 393 | const size_t input_index = |
| 394 | i * input_strides[0] + j * input_strides[1] + k * input_strides[2] + |
| 395 | l * input_strides[3] + m * input_strides[4] + n * input_strides[5]; |
| 396 | output_ref[output_index] = input[input_index]; |
| 397 | } |
| 398 | } |
| 399 | } |
| 400 | } |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | // Create, setup, run, and destroy a binary elementwise operator. |
| 405 | ASSERT_EQ(xnn_status_success, xnn_initialize(nullptr /* allocator */)); |
| 406 | xnn_operator_t pad_op = nullptr; |
| 407 | |
| 408 | ASSERT_EQ(xnn_status_success, |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 409 | xnn_create_constant_pad_nd_x32( |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 410 | &padding_value, 0, &pad_op)); |
| 411 | ASSERT_NE(nullptr, pad_op); |
| 412 | |
| 413 | // Smart pointer to automatically delete pad_op. |
| 414 | std::unique_ptr<xnn_operator, decltype(&xnn_delete_operator)> auto_pad_op(pad_op, xnn_delete_operator); |
| 415 | |
| 416 | ASSERT_EQ(xnn_status_success, |
Marat Dukhan | 065b11e | 2020-05-22 09:49:41 -0700 | [diff] [blame] | 417 | xnn_setup_constant_pad_nd_x32( |
Marat Dukhan | 4662b19 | 2020-05-21 15:52:03 -0700 | [diff] [blame] | 418 | pad_op, |
| 419 | num_dims(), |
| 420 | input_shape().data(), pre_paddings().data(), post_paddings().data(), |
| 421 | input.data(), output.data(), |
| 422 | nullptr /* thread pool */)); |
| 423 | |
| 424 | ASSERT_EQ(xnn_status_success, |
| 425 | xnn_run_operator(pad_op, nullptr /* thread pool */)); |
| 426 | |
| 427 | // Verify results. |
| 428 | for (size_t i = 0; i < output_dims[0]; i++) { |
| 429 | for (size_t j = 0; j < output_dims[1]; j++) { |
| 430 | for (size_t k = 0; k < output_dims[2]; k++) { |
| 431 | for (size_t l = 0; l < output_dims[3]; l++) { |
| 432 | for (size_t m = 0; m < output_dims[4]; m++) { |
| 433 | for (size_t n = 0; n < output_dims[5]; n++) { |
| 434 | const size_t index = |
| 435 | i * output_strides[0] + j * output_strides[1] + k * output_strides[2] + |
| 436 | l * output_strides[3] + m * output_strides[4] + n * output_strides[5]; |
| 437 | ASSERT_EQ(output[index], output_ref[index]) |
| 438 | << "(i, j, k, l, m, n) = (" |
| 439 | << i << ", " << j << ", " << k << ", " << l << ", " << m << ", " << n << ")" |
| 440 | << ", padding value = " << padding_value; |
| 441 | } |
| 442 | } |
| 443 | } |
| 444 | } |
| 445 | } |
| 446 | } |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | private: |
| 451 | std::vector<size_t> input_shape_; |
| 452 | std::vector<size_t> pre_paddings_; |
| 453 | std::vector<size_t> post_paddings_; |
| 454 | size_t iterations_{3}; |
| 455 | }; |