blob: f6c2fdaf72f0d9c54fccf9e47d5afa7164f057f1 [file] [log] [blame]
Kaizen8938bd32017-09-28 14:38:23 +01001/*
Anthony Barbierf45d5a92018-01-24 16:23:15 +00002 * Copyright (c) 2017-2018 ARM Limited.
Kaizen8938bd32017-09-28 14:38:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "utils/GraphUtils.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000026
Jenkinsb3a371b2018-05-23 11:36:53 +010027#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/Types.h"
Jenkins52ba29e2018-08-29 15:32:11 +000029#include "arm_compute/graph/Logger.h"
hakanardo29222792018-02-16 10:06:34 +010030#include "arm_compute/runtime/SubTensor.h"
Jenkins52ba29e2018-08-29 15:32:11 +000031#include "utils/ImageLoader.h"
Anthony Barbier06ea0482018-02-22 15:45:35 +000032#include "utils/Utils.h"
Kaizen8938bd32017-09-28 14:38:23 +010033
Anthony Barbier8a3da6f2017-10-23 18:55:17 +010034#include <iomanip>
Jenkins52ba29e2018-08-29 15:32:11 +000035#include <limits>
Kaizen8938bd32017-09-28 14:38:23 +010036
37using namespace arm_compute::graph_utils;
38
Jenkinsb3a371b2018-05-23 11:36:53 +010039namespace
40{
Jenkins52ba29e2018-08-29 15:32:11 +000041std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape,
Jenkinsb3a371b2018-05-23 11:36:53 +010042 arm_compute::DataLayout data_layout)
43{
44 // Set permutation parameters if needed
45 arm_compute::TensorShape permuted_shape = shape;
46 arm_compute::PermutationVector perm;
47 // Permute only if num_dimensions greater than 2
48 if(shape.num_dimensions() > 2)
49 {
50 perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
51
52 arm_compute::PermutationVector perm_shape = (data_layout == arm_compute::DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
53 arm_compute::permute(permuted_shape, perm_shape);
54 }
55
56 return std::make_pair(permuted_shape, perm);
57}
58} // namespace
59
Anthony Barbier06ea0482018-02-22 15:45:35 +000060void TFPreproccessor::preprocess(ITensor &tensor)
61{
62 Window window;
63 window.use_tensor_dimensions(tensor.info()->tensor_shape());
64
65 execute_window_loop(window, [&](const Coordinates & id)
66 {
67 const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id));
68 float res = value / 255.f; // Normalize to [0, 1]
69 res = (res - 0.5f) * 2.f; // Map to [-1, 1]
70 *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = res;
71 });
72}
73
74CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr)
75 : _mean(mean), _bgr(bgr)
76{
77 if(_bgr)
78 {
79 std::swap(_mean[0], _mean[2]);
80 }
81}
82
83void CaffePreproccessor::preprocess(ITensor &tensor)
84{
85 Window window;
86 window.use_tensor_dimensions(tensor.info()->tensor_shape());
87
Jenkins52ba29e2018-08-29 15:32:11 +000088 const int channel_idx = get_data_layout_dimension_index(tensor.info()->data_layout(), DataLayoutDimension::CHANNEL);
89
Anthony Barbier06ea0482018-02-22 15:45:35 +000090 execute_window_loop(window, [&](const Coordinates & id)
91 {
Jenkins52ba29e2018-08-29 15:32:11 +000092 const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - _mean[id[channel_idx]];
Anthony Barbier06ea0482018-02-22 15:45:35 +000093 *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value;
94 });
95}
96
Kaizen8938bd32017-09-28 14:38:23 +010097PPMWriter::PPMWriter(std::string name, unsigned int maximum)
98 : _name(std::move(name)), _iterator(0), _maximum(maximum)
99{
100}
101
102bool PPMWriter::access_tensor(ITensor &tensor)
103{
104 std::stringstream ss;
105 ss << _name << _iterator << ".ppm";
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100106
107 arm_compute::utils::save_to_ppm(tensor, ss.str());
Kaizen8938bd32017-09-28 14:38:23 +0100108
109 _iterator++;
110 if(_maximum == 0)
111 {
112 return true;
113 }
114 return _iterator < _maximum;
115}
116
117DummyAccessor::DummyAccessor(unsigned int maximum)
118 : _iterator(0), _maximum(maximum)
119{
120}
121
122bool DummyAccessor::access_tensor(ITensor &tensor)
123{
124 ARM_COMPUTE_UNUSED(tensor);
125 bool ret = _maximum == 0 || _iterator < _maximum;
126 if(_iterator == _maximum)
127 {
128 _iterator = 0;
129 }
130 else
131 {
132 _iterator++;
133 }
134 return ret;
135}
136
Jenkinsb3a371b2018-05-23 11:36:53 +0100137NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream)
138 : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
139{
140 NumPyBinLoader loader(_filename);
141
142 TensorInfo info(shape, 1, data_type);
143 _npy_tensor.allocator()->init(info);
144 _npy_tensor.allocator()->allocate();
145
146 loader.access_tensor(_npy_tensor);
147}
148
149template <typename T>
150void NumPyAccessor::access_numpy_tensor(ITensor &tensor)
151{
Jenkins52ba29e2018-08-29 15:32:11 +0000152 const int num_elements = tensor.info()->tensor_shape().total_size();
Jenkinsb3a371b2018-05-23 11:36:53 +0100153 int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor);
154 float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements;
155
156 _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl;
157}
158
159bool NumPyAccessor::access_tensor(ITensor &tensor)
160{
161 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
162 ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0));
163
164 switch(tensor.info()->data_type())
165 {
166 case DataType::F32:
167 access_numpy_tensor<float>(tensor);
168 break;
169 default:
170 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
171 }
172
173 return false;
174}
175
Jenkins52ba29e2018-08-29 15:32:11 +0000176ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
177 : _already_loaded(false), _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100178{
179}
180
Jenkins52ba29e2018-08-29 15:32:11 +0000181bool ImageAccessor::access_tensor(ITensor &tensor)
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100182{
Jenkins52ba29e2018-08-29 15:32:11 +0000183 if(!_already_loaded)
Jenkinsb3a371b2018-05-23 11:36:53 +0100184 {
Jenkins52ba29e2018-08-29 15:32:11 +0000185 auto image_loader = utils::ImageLoaderFactory::create(_filename);
186 ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type");
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000187
Jenkins52ba29e2018-08-29 15:32:11 +0000188 // Open image file
189 image_loader->open(_filename);
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100190
Jenkins52ba29e2018-08-29 15:32:11 +0000191 // Get permutated shape and permutation parameters
192 TensorShape permuted_shape = tensor.info()->tensor_shape();
193 arm_compute::PermutationVector perm;
194 if(tensor.info()->data_layout() != DataLayout::NCHW)
195 {
196 std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
197 }
198 ARM_COMPUTE_EXIT_ON_MSG(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
199 "Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].",
200 image_loader->width(), image_loader->height(), permuted_shape.x(), permuted_shape.y());
201
202 // Fill the tensor with the PPM content (BGR)
203 image_loader->fill_planar_tensor(tensor, _bgr);
204
205 // Preprocess tensor
206 if(_preprocessor)
207 {
208 _preprocessor->preprocess(tensor);
209 }
Anthony Barbier06ea0482018-02-22 15:45:35 +0000210 }
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100211
Jenkins52ba29e2018-08-29 15:32:11 +0000212 _already_loaded = !_already_loaded;
213 return _already_loaded;
214}
215
216ValidationInputAccessor::ValidationInputAccessor(const std::string &image_list,
217 std::string images_path,
218 std::unique_ptr<IPreprocessor> preprocessor,
219 bool bgr,
220 unsigned int start,
221 unsigned int end,
222 std::ostream &output_stream)
223 : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream)
224{
225 ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
226
227 std::ifstream ifs;
228 try
229 {
230 ifs.exceptions(std::ifstream::badbit);
231 ifs.open(image_list, std::ios::in | std::ios::binary);
232
233 // Parse image names
234 unsigned int counter = 0;
235 for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
236 {
237 // Add image to process if withing range
238 if(counter >= start)
239 {
240 std::stringstream linestream(line);
241 std::string image_name;
242
243 linestream >> image_name;
244 _images.emplace_back(std::move(image_name));
245 }
246 }
247 }
248 catch(const std::ifstream::failure &e)
249 {
250 ARM_COMPUTE_ERROR("Accessing %s: %s", image_list.c_str(), e.what());
251 }
252}
253
254bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor)
255{
256 bool ret = _offset < _images.size();
257 if(ret)
258 {
259 utils::JPEGLoader jpeg;
260
261 // Open JPEG file
262 std::string image_name = _path + _images[_offset++];
263 jpeg.open(image_name);
264 _output_stream << "[" << _offset << "/" << _images.size() << "] Validating " << image_name << std::endl;
265
266 // Get permutated shape and permutation parameters
267 TensorShape permuted_shape = tensor.info()->tensor_shape();
268 arm_compute::PermutationVector perm;
269 if(tensor.info()->data_layout() != DataLayout::NCHW)
270 {
271 std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(),
272 tensor.info()->data_layout());
273 }
274 ARM_COMPUTE_EXIT_ON_MSG(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
275 "Failed to load image file: dimensions [%d,%d] not correct, expected [%d,%d].",
276 jpeg.width(), jpeg.height(), permuted_shape.x(), permuted_shape.y());
277
278 // Fill the tensor with the JPEG content (BGR)
279 jpeg.fill_planar_tensor(tensor, _bgr);
280
281 // Preprocess tensor
282 if(_preprocessor)
283 {
284 _preprocessor->preprocess(tensor);
285 }
286 }
287
288 return ret;
289}
290
291ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list,
292 std::ostream &output_stream,
293 unsigned int start,
294 unsigned int end)
295 : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
296{
297 ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
298
299 std::ifstream ifs;
300 try
301 {
302 ifs.exceptions(std::ifstream::badbit);
303 ifs.open(image_list, std::ios::in | std::ios::binary);
304
305 // Parse image correctly classified labels
306 unsigned int counter = 0;
307 for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
308 {
309 // Add label if within range
310 if(counter >= start)
311 {
312 std::stringstream linestream(line);
313 std::string image_name;
314 int result;
315
316 linestream >> image_name >> result;
317 _results.emplace_back(result);
318 }
319 }
320 }
321 catch(const std::ifstream::failure &e)
322 {
323 ARM_COMPUTE_ERROR("Accessing %s: %s", image_list.c_str(), e.what());
324 }
325}
326
327void ValidationOutputAccessor::reset()
328{
329 _offset = 0;
330 _positive_samples_top1 = 0;
331 _positive_samples_top5 = 0;
332}
333
334bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor)
335{
336 bool ret = _offset < _results.size();
337 if(ret)
338 {
339 // Get results
340 std::vector<size_t> tensor_results;
341 switch(tensor.info()->data_type())
342 {
343 case DataType::QASYMM8:
344 tensor_results = access_predictions_tensor<uint8_t>(tensor);
345 break;
346 case DataType::F32:
347 tensor_results = access_predictions_tensor<float>(tensor);
348 break;
349 default:
350 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
351 }
352
353 // Check if tensor results are within top-n accuracy
354 size_t correct_label = _results[_offset++];
355
356 aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
357 aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
358 }
359
360 // Report top_n accuracy
361 if(_offset >= _results.size())
362 {
363 report_top_n(1, _results.size(), _positive_samples_top1);
364 report_top_n(5, _results.size(), _positive_samples_top5);
365 }
366
367 return ret;
368}
369
370template <typename T>
371std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_compute::ITensor &tensor)
372{
373 // Get the predicted class
374 std::vector<size_t> index;
375
376 const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
377 const size_t num_classes = tensor.info()->dimension(0);
378
379 index.resize(num_classes);
380
381 // Sort results
382 std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
383 std::sort(std::begin(index), std::end(index),
384 [&](size_t a, size_t b)
385 {
386 return output_net[a] > output_net[b];
387 });
388
389 return index;
390}
391
392void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
393{
394 auto is_valid_label = [correct_label](size_t label)
395 {
396 return label == correct_label;
397 };
398
399 if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
400 {
401 ++positive_samples;
402 }
403}
404
405void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
406{
407 size_t negative_samples = total_samples - positive_samples;
408 float accuracy = positive_samples / static_cast<float>(total_samples);
409
410 _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
411 << std::endl;
412 _output_stream << "Positive samples : " << positive_samples << std::endl;
413 _output_stream << "Negative samples : " << negative_samples << std::endl;
414 _output_stream << "Accuracy : " << accuracy << std::endl;
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100415}
416
417TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
418 : _labels(), _output_stream(output_stream), _top_n(top_n)
419{
420 _labels.clear();
421
422 std::ifstream ifs;
423
424 try
425 {
426 ifs.exceptions(std::ifstream::badbit);
427 ifs.open(labels_path, std::ios::in | std::ios::binary);
428
429 for(std::string line; !std::getline(ifs, line).fail();)
430 {
431 _labels.emplace_back(line);
432 }
433 }
434 catch(const std::ifstream::failure &e)
435 {
436 ARM_COMPUTE_ERROR("Accessing %s: %s", labels_path.c_str(), e.what());
437 }
438}
439
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000440template <typename T>
441void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor)
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100442{
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100443 // Get the predicted class
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000444 std::vector<T> classes_prob;
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100445 std::vector<size_t> index;
446
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000447 const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100448 const size_t num_classes = tensor.info()->dimension(0);
449
450 classes_prob.resize(num_classes);
451 index.resize(num_classes);
452
453 std::copy(output_net, output_net + num_classes, classes_prob.begin());
454
455 // Sort results
456 std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
457 std::sort(std::begin(index), std::end(index),
458 [&](size_t a, size_t b)
459 {
460 return classes_prob[a] > classes_prob[b];
461 });
462
463 _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
464 << std::endl;
465 for(size_t i = 0; i < _top_n; ++i)
466 {
467 _output_stream << std::fixed << std::setprecision(4)
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000468 << +classes_prob[index.at(i)]
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100469 << " - [id = " << index.at(i) << "]"
470 << ", " << _labels[index.at(i)] << std::endl;
471 }
Anthony Barbierf45d5a92018-01-24 16:23:15 +0000472}
473
474bool TopNPredictionsAccessor::access_tensor(ITensor &tensor)
475{
476 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32, DataType::QASYMM8);
477 ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
478
479 switch(tensor.info()->data_type())
480 {
481 case DataType::QASYMM8:
482 access_predictions_tensor<uint8_t>(tensor);
483 break;
484 case DataType::F32:
485 access_predictions_tensor<float>(tensor);
486 break;
487 default:
488 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
489 }
Anthony Barbier8a3da6f2017-10-23 18:55:17 +0100490
491 return false;
492}
493
Kaizenbf8b01d2017-10-12 14:26:51 +0100494RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
495 : _lower(lower), _upper(upper), _seed(seed)
496{
497}
498
499template <typename T, typename D>
500void RandomAccessor::fill(ITensor &tensor, D &&distribution)
501{
502 std::mt19937 gen(_seed);
503
Anthony Barbier06ea0482018-02-22 15:45:35 +0000504 if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr))
Kaizenbf8b01d2017-10-12 14:26:51 +0100505 {
506 for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size())
507 {
508 const T value = distribution(gen);
509 *reinterpret_cast<T *>(tensor.buffer() + offset) = value;
510 }
511 }
512 else
513 {
514 // If tensor has padding accessing tensor elements through execution window.
515 Window window;
516 window.use_tensor_dimensions(tensor.info()->tensor_shape());
517
518 execute_window_loop(window, [&](const Coordinates & id)
519 {
520 const T value = distribution(gen);
521 *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value;
522 });
523 }
524}
525
526bool RandomAccessor::access_tensor(ITensor &tensor)
527{
528 switch(tensor.info()->data_type())
529 {
530 case DataType::U8:
531 {
532 std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>());
533 fill<uint8_t>(tensor, distribution_u8);
534 break;
535 }
536 case DataType::S8:
Kaizenbf8b01d2017-10-12 14:26:51 +0100537 {
538 std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
539 fill<int8_t>(tensor, distribution_s8);
540 break;
541 }
542 case DataType::U16:
543 {
544 std::uniform_int_distribution<uint16_t> distribution_u16(_lower.get<uint16_t>(), _upper.get<uint16_t>());
545 fill<uint16_t>(tensor, distribution_u16);
546 break;
547 }
548 case DataType::S16:
Kaizenbf8b01d2017-10-12 14:26:51 +0100549 {
550 std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
551 fill<int16_t>(tensor, distribution_s16);
552 break;
553 }
554 case DataType::U32:
555 {
556 std::uniform_int_distribution<uint32_t> distribution_u32(_lower.get<uint32_t>(), _upper.get<uint32_t>());
557 fill<uint32_t>(tensor, distribution_u32);
558 break;
559 }
560 case DataType::S32:
561 {
562 std::uniform_int_distribution<int32_t> distribution_s32(_lower.get<int32_t>(), _upper.get<int32_t>());
563 fill<int32_t>(tensor, distribution_s32);
564 break;
565 }
566 case DataType::U64:
567 {
568 std::uniform_int_distribution<uint64_t> distribution_u64(_lower.get<uint64_t>(), _upper.get<uint64_t>());
569 fill<uint64_t>(tensor, distribution_u64);
570 break;
571 }
572 case DataType::S64:
573 {
574 std::uniform_int_distribution<int64_t> distribution_s64(_lower.get<int64_t>(), _upper.get<int64_t>());
575 fill<int64_t>(tensor, distribution_s64);
576 break;
577 }
578 case DataType::F16:
579 {
580 std::uniform_real_distribution<float> distribution_f16(_lower.get<float>(), _upper.get<float>());
581 fill<float>(tensor, distribution_f16);
582 break;
583 }
584 case DataType::F32:
585 {
586 std::uniform_real_distribution<float> distribution_f32(_lower.get<float>(), _upper.get<float>());
587 fill<float>(tensor, distribution_f32);
588 break;
589 }
590 case DataType::F64:
591 {
592 std::uniform_real_distribution<double> distribution_f64(_lower.get<double>(), _upper.get<double>());
593 fill<double>(tensor, distribution_f64);
594 break;
595 }
596 default:
597 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
598 }
599 return true;
600}
601
Jenkinsb3a371b2018-05-23 11:36:53 +0100602NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout)
Jenkins52ba29e2018-08-29 15:32:11 +0000603 : _already_loaded(false), _filename(std::move(filename)), _file_layout(file_layout)
Kaizen8938bd32017-09-28 14:38:23 +0100604{
605}
606
607bool NumPyBinLoader::access_tensor(ITensor &tensor)
608{
Jenkins52ba29e2018-08-29 15:32:11 +0000609 if(!_already_loaded)
Kaizen8938bd32017-09-28 14:38:23 +0100610 {
Jenkins52ba29e2018-08-29 15:32:11 +0000611 utils::NPYLoader loader;
612 loader.open(_filename, _file_layout);
613 loader.fill_tensor(tensor);
Anthony Barbier06ea0482018-02-22 15:45:35 +0000614 }
615
Jenkins52ba29e2018-08-29 15:32:11 +0000616 _already_loaded = !_already_loaded;
617 return _already_loaded;
Anthony Barbier8140e1e2017-12-14 23:48:46 +0000618}