blob: e708f0116da350991e6e3ab37d272362f12cc3cf [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
Jim Flynnf2e175c2019-12-12 15:11:30 +00009#include "Half.hpp"
telsoa015307bc12018-03-09 13:51:08 +000010
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000011#include <armnnUtils/Permute.hpp>
12
Derek Lambertid00ad912020-01-22 15:55:16 +000013#include <armnn/Utils.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010014#include <armnn/utility/Assert.hpp>
Colm Donelan08d9a1c2020-09-09 17:56:55 +010015#include <Filesystem.hpp>
16#include <log/log.h>
Derek Lambertid00ad912020-01-22 15:55:16 +000017
telsoa015307bc12018-03-09 13:51:08 +000018#include <cassert>
Jim Flynn829ad302019-12-13 14:43:24 +000019#include <cerrno>
telsoa015307bc12018-03-09 13:51:08 +000020#include <cinttypes>
Jim Flynn829ad302019-12-13 14:43:24 +000021#include <sstream>
22#include <cstdio>
23#include <time.h>
24
25
telsoa015307bc12018-03-09 13:51:08 +000026
27using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010028using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000029using namespace android::hidl::memory::V1_0;
30
31namespace armnn_driver
32{
33const armnn::PermutationVector g_DontPermute{};
34
35namespace
36{
37
telsoa015307bc12018-03-09 13:51:08 +000038void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000039 void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
telsoa015307bc12018-03-09 13:51:08 +000040{
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000041 assert(inTensorShape.GetNumDimensions() == 4U);
telsoa015307bc12018-03-09 13:51:08 +000042
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000043 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
telsoa015307bc12018-03-09 13:51:08 +000044}
45
46} // anonymous namespace
47
48void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
49 const armnn::PermutationVector& mappings)
50{
51 assert(tensor.GetNumDimensions() == 4U);
52
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000053 armnn::DataType dataType = tensor.GetDataType();
54 switch (dataType)
telsoa015307bc12018-03-09 13:51:08 +000055 {
Mike Kelly3c673942019-07-25 09:26:06 +010056 case armnn::DataType::Float16:
telsoa015307bc12018-03-09 13:51:08 +000057 case armnn::DataType::Float32:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000058 case armnn::DataType::QAsymmU8:
Derek Lambertid00ad912020-01-22 15:55:16 +000059 case armnn::DataType::QSymmS8:
Sadik Armagan1153d1e2020-04-01 15:09:39 +010060 case armnn::DataType::QAsymmS8:
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000061 SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +000062 break;
telsoa015307bc12018-03-09 13:51:08 +000063 default:
64 ALOGW("Unknown armnn::DataType for swizzling");
65 assert(0);
66 }
67}
68
Kevin DuBois30c34ae2020-08-26 13:53:41 -070069void* GetMemoryFromPool(V1_0::DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
telsoa015307bc12018-03-09 13:51:08 +000070{
71 // find the location within the pool
72 assert(location.poolIndex < memPools.size());
73
surmeh01deb3bdb2018-07-05 12:06:04 +010074 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
75
surmeh01deb3bdb2018-07-05 12:06:04 +010076 uint8_t* memPoolBuffer = memPool.getBuffer();
surmeh01deb3bdb2018-07-05 12:06:04 +010077
78 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000079
80 return memory;
81}
82
Matthew Bentham912b3622019-05-03 15:49:14 +010083armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000084{
Finn Williamsa4983ce2020-07-23 12:55:12 +010085 using namespace armnn;
86 DataType type;
telsoa015307bc12018-03-09 13:51:08 +000087
88 switch (operand.type)
89 {
Matthew Bentham912b3622019-05-03 15:49:14 +010090 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000091 type = armnn::DataType::Float32;
92 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010093 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000094 type = armnn::DataType::QAsymmU8;
telsoa015307bc12018-03-09 13:51:08 +000095 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010096 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000097 type = armnn::DataType::Signed32;
98 break;
99 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +0100100 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +0000101 }
102
Finn Williamsa4983ce2020-07-23 12:55:12 +0100103 TensorInfo ret;
104 if (operand.dimensions.size() == 0)
105 {
106 TensorShape tensorShape(Dimensionality::NotSpecified);
107 ret = TensorInfo(tensorShape, type);
108 }
109 else
110 {
111 bool dimensionsSpecificity[5] = { true, true, true, true, true };
112 int count = 0;
113 std::for_each(operand.dimensions.data(),
114 operand.dimensions.data() + operand.dimensions.size(),
115 [&](const unsigned int val)
116 {
117 if (val == 0)
118 {
119 dimensionsSpecificity[count] = false;
120 }
121 count++;
122 });
123
124 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
125 ret = TensorInfo(tensorShape, type);
126 }
telsoa015307bc12018-03-09 13:51:08 +0000127
128 ret.SetQuantizationScale(operand.scale);
129 ret.SetQuantizationOffset(operand.zeroPoint);
130
131 return ret;
132}
133
Kevin May42477c12020-03-26 13:34:14 +0000134#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100135
136armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
137{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000138 using namespace armnn;
Derek Lambertid00ad912020-01-22 15:55:16 +0000139 bool perChannel = false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100140
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000141 DataType type;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100142 switch (operand.type)
143 {
Sadik Armagan793a70c2020-03-19 13:54:04 +0000144 case V1_2::OperandType::TENSOR_BOOL8:
145 type = armnn::DataType::Boolean;
146 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100147 case V1_2::OperandType::TENSOR_FLOAT32:
148 type = armnn::DataType::Float32;
149 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100150 case V1_2::OperandType::TENSOR_FLOAT16:
151 type = armnn::DataType::Float16;
152 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100153 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000154 type = armnn::DataType::QAsymmU8;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100155 break;
Derek Lambertid00ad912020-01-22 15:55:16 +0000156 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
157 perChannel=true;
158 ARMNN_FALLTHROUGH;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000159 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
FinnWilliamsArm624fe9f2019-12-06 17:12:42 +0000160 type = armnn::DataType::QSymmS8;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000161 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100162 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000163 type = armnn::DataType::QSymmS16;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164 break;
165 case V1_2::OperandType::TENSOR_INT32:
166 type = armnn::DataType::Signed32;
167 break;
168 default:
169 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
170 }
171
Finn Williamsa4983ce2020-07-23 12:55:12 +0100172 TensorInfo ret;
173 if (operand.dimensions.size() == 0)
174 {
175 TensorShape tensorShape(Dimensionality::NotSpecified);
176 ret = TensorInfo(tensorShape, type);
177 }
178 else
179 {
180 bool dimensionsSpecificity[5] = { true, true, true, true, true };
181 int count = 0;
182 std::for_each(operand.dimensions.data(),
183 operand.dimensions.data() + operand.dimensions.size(),
184 [&](const unsigned int val)
185 {
186 if (val == 0)
187 {
188 dimensionsSpecificity[count] = false;
189 }
190 count++;
191 });
192
193 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
194 ret = TensorInfo(tensorShape, type);
195 }
196
Derek Lambertid00ad912020-01-22 15:55:16 +0000197 if (perChannel)
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000198 {
199 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100200 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100202
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000203 auto perAxisQuantParams = operand.extraParams.channelQuant();
204
205 ret.SetQuantizationScales(perAxisQuantParams.scales);
206 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
207 }
208 else
209 {
210 ret.SetQuantizationScale(operand.scale);
211 ret.SetQuantizationOffset(operand.zeroPoint);
212 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100213
214 return ret;
215}
216
217#endif
218
Kevin May42477c12020-03-26 13:34:14 +0000219#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
220
221armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
222{
223 using namespace armnn;
224 bool perChannel = false;
Teresa Charlin896572b2020-07-15 12:37:51 +0100225 bool isScalar = false;
Kevin May42477c12020-03-26 13:34:14 +0000226
227 DataType type;
228 switch (operand.type)
229 {
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100230 case V1_3::OperandType::TENSOR_BOOL8:
231 type = armnn::DataType::Boolean;
232 break;
Kevin May42477c12020-03-26 13:34:14 +0000233 case V1_3::OperandType::TENSOR_FLOAT32:
234 type = armnn::DataType::Float32;
235 break;
236 case V1_3::OperandType::TENSOR_FLOAT16:
237 type = armnn::DataType::Float16;
238 break;
239 case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
240 type = armnn::DataType::QAsymmU8;
241 break;
242 case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
243 perChannel=true;
244 ARMNN_FALLTHROUGH;
245 case V1_3::OperandType::TENSOR_QUANT8_SYMM:
246 type = armnn::DataType::QSymmS8;
247 break;
248 case V1_3::OperandType::TENSOR_QUANT16_SYMM:
249 type = armnn::DataType::QSymmS16;
250 break;
251 case V1_3::OperandType::TENSOR_INT32:
252 type = armnn::DataType::Signed32;
253 break;
Finn Williamsfc884b42020-06-11 17:35:44 +0100254 case V1_3::OperandType::INT32:
255 type = armnn::DataType::Signed32;
Teresa Charlin896572b2020-07-15 12:37:51 +0100256 isScalar = true;
Finn Williamsfc884b42020-06-11 17:35:44 +0100257 break;
Kevin May42477c12020-03-26 13:34:14 +0000258 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
259 type = armnn::DataType::QAsymmS8;
260 break;
261 default:
262 throw UnsupportedOperand<V1_3::OperandType>(operand.type);
263 }
264
Finn Williamsfc884b42020-06-11 17:35:44 +0100265 TensorInfo ret;
Teresa Charlin896572b2020-07-15 12:37:51 +0100266 if (isScalar)
Finn Williamsfc884b42020-06-11 17:35:44 +0100267 {
Teresa Charlin896572b2020-07-15 12:37:51 +0100268 ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
Finn Williamsfc884b42020-06-11 17:35:44 +0100269 }
270 else
271 {
Finn Williamsa4983ce2020-07-23 12:55:12 +0100272 if (operand.dimensions.size() == 0)
273 {
274 TensorShape tensorShape(Dimensionality::NotSpecified);
275 ret = TensorInfo(tensorShape, type);
276 }
277 else
278 {
279 bool dimensionsSpecificity[5] = { true, true, true, true, true };
280 int count = 0;
281 std::for_each(operand.dimensions.data(),
282 operand.dimensions.data() + operand.dimensions.size(),
283 [&](const unsigned int val)
284 {
285 if (val == 0)
286 {
287 dimensionsSpecificity[count] = false;
288 }
289 count++;
290 });
291
292 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
293 ret = TensorInfo(tensorShape, type);
294 }
Finn Williamsfc884b42020-06-11 17:35:44 +0100295 }
296
Kevin May42477c12020-03-26 13:34:14 +0000297 if (perChannel)
298 {
299 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100300 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Kevin May352d8382020-03-31 15:03:42 +0100301 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Kevin May42477c12020-03-26 13:34:14 +0000302
303 auto perAxisQuantParams = operand.extraParams.channelQuant();
304
305 ret.SetQuantizationScales(perAxisQuantParams.scales);
306 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
307 }
308 else
309 {
310 ret.SetQuantizationScale(operand.scale);
311 ret.SetQuantizationOffset(operand.zeroPoint);
312 }
Kevin May42477c12020-03-26 13:34:14 +0000313 return ret;
314}
315
316#endif
317
Matthew Bentham912b3622019-05-03 15:49:14 +0100318std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000319{
320 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
321 toString(operand.type);
322}
323
Kevin May42477c12020-03-26 13:34:14 +0000324#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100325
326std::string GetOperandSummary(const V1_2::Operand& operand)
327{
328 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
329 toString(operand.type);
330}
331
332#endif
333
Kevin May42477c12020-03-26 13:34:14 +0000334#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
335
336std::string GetOperandSummary(const V1_3::Operand& operand)
337{
338 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
339 toString(operand.type);
340}
341
342#endif
343
telsoa015307bc12018-03-09 13:51:08 +0000344using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
345 unsigned int elementIndex,
346 std::ofstream& fileStream);
347
348namespace
349{
350template <typename ElementType, typename PrintableType = ElementType>
351void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
352{
353 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
354 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
355}
356
357constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
358{
359 const char* str = "";
360
361 switch (tensor.GetNumDimensions())
362 {
363 case 4: { str = "(BHWC) "; break; }
364 case 3: { str = "(HWC) "; break; }
365 case 2: { str = "(HW) "; break; }
366 default: { str = ""; break; }
367 }
368
369 return str;
370}
371} // namespace
372
373void DumpTensor(const std::string& dumpDir,
374 const std::string& requestName,
375 const std::string& tensorName,
376 const armnn::ConstTensor& tensor)
377{
378 // The dump directory must exist in advance.
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100379 fs::path dumpPath = dumpDir;
380 const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
telsoa015307bc12018-03-09 13:51:08 +0000381
382 std::ofstream fileStream;
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100383 fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
telsoa015307bc12018-03-09 13:51:08 +0000384
385 if (!fileStream.good())
386 {
387 ALOGW("Could not open file %s for writing", fileName.c_str());
388 return;
389 }
390
391 DumpElementFunction dumpElementFunction = nullptr;
392
393 switch (tensor.GetDataType())
394 {
395 case armnn::DataType::Float32:
396 {
397 dumpElementFunction = &DumpTensorElement<float>;
398 break;
399 }
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000400 case armnn::DataType::QAsymmU8:
telsoa015307bc12018-03-09 13:51:08 +0000401 {
402 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
403 break;
404 }
405 case armnn::DataType::Signed32:
406 {
407 dumpElementFunction = &DumpTensorElement<int32_t>;
408 break;
409 }
Jim Flynnf2e175c2019-12-12 15:11:30 +0000410 case armnn::DataType::Float16:
411 {
412 dumpElementFunction = &DumpTensorElement<armnn::Half>;
413 break;
414 }
Teresa Charlinb248ec12020-04-30 11:06:34 +0100415 case armnn::DataType::QAsymmS8:
416 {
417 dumpElementFunction = &DumpTensorElement<int8_t, int32_t>;
418 break;
419 }
420 case armnn::DataType::Boolean:
421 {
422 dumpElementFunction = &DumpTensorElement<bool>;
423 break;
424 }
telsoa015307bc12018-03-09 13:51:08 +0000425 default:
426 {
427 dumpElementFunction = nullptr;
428 }
429 }
430
431 if (dumpElementFunction != nullptr)
432 {
433 const unsigned int numDimensions = tensor.GetNumDimensions();
434
435 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
436
437 const unsigned int height = (numDimensions >= 3)
438 ? tensor.GetShape()[numDimensions - 3]
439 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
440
441 const unsigned int width = (numDimensions >= 3)
442 ? tensor.GetShape()[numDimensions - 2]
443 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
444
445 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
446
447 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
448 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
449 fileStream << "[" << tensor.GetShape()[0];
450 for (unsigned int d = 1; d < numDimensions; d++)
451 {
452 fileStream << "," << tensor.GetShape()[d];
453 }
454 fileStream << "]" << std::endl;
455
456 for (unsigned int e = 0, b = 0; b < batch; ++b)
457 {
458 if (numDimensions >= 4)
459 {
460 fileStream << "# Batch " << b << std::endl;
461 }
462 for (unsigned int c = 0; c < channels; c++)
463 {
464 if (numDimensions >= 3)
465 {
466 fileStream << "# Channel " << c << std::endl;
467 }
468 for (unsigned int h = 0; h < height; h++)
469 {
470 for (unsigned int w = 0; w < width; w++, e += channels)
471 {
472 (*dumpElementFunction)(tensor, e, fileStream);
473 }
474 fileStream << std::endl;
475 }
476 e -= channels - 1;
477 if (c < channels)
478 {
479 e -= ((height * width) - 1) * channels;
480 }
481 }
482 fileStream << std::endl;
483 }
484 fileStream << std::endl;
485 }
486 else
487 {
488 fileStream << "Cannot dump tensor elements: Unsupported data type "
489 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
490 }
491
492 if (!fileStream.good())
493 {
494 ALOGW("An error occurred when writing to file %s", fileName.c_str());
495 }
496}
497
telsoa01ce3e84a2018-08-31 09:31:35 +0100498void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
499 const std::string& dumpDir,
500 armnn::NetworkId networkId,
501 const armnn::IProfiler* profiler)
502{
503 // Check if profiling is required.
504 if (!gpuProfilingEnabled)
505 {
506 return;
507 }
508
509 // The dump directory must exist in advance.
510 if (dumpDir.empty())
511 {
512 return;
513 }
514
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100515 ARMNN_ASSERT(profiler);
telsoa01ce3e84a2018-08-31 09:31:35 +0100516
517 // Set the name of the output profiling file.
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100518 fs::path dumpPath = dumpDir;
519 const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
telsoa01ce3e84a2018-08-31 09:31:35 +0100520
521 // Open the ouput file for writing.
522 std::ofstream fileStream;
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100523 fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
telsoa01ce3e84a2018-08-31 09:31:35 +0100524
525 if (!fileStream.good())
526 {
527 ALOGW("Could not open file %s for writing", fileName.c_str());
528 return;
529 }
530
531 // Write the profiling info to a JSON file.
532 profiler->Print(fileStream);
533}
534
Jim Flynn829ad302019-12-13 14:43:24 +0000535std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
536 const std::string& dumpDir)
537{
538 std::string fileName;
539 // The dump directory must exist in advance.
540 if (dumpDir.empty())
541 {
542 return fileName;
543 }
544
545 std::string timestamp = GetFileTimestamp();
546 if (timestamp.empty())
547 {
548 return fileName;
549 }
550
551 // Set the name of the output .dot file.
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100552 fs::path dumpPath = dumpDir;
553 fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
554 fileName = tempFilePath.string();
Jim Flynn829ad302019-12-13 14:43:24 +0000555
556 ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
557
558 // Write the network graph to a dot file.
559 std::ofstream fileStream;
560 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
561
562 if (!fileStream.good())
563 {
564 ALOGW("Could not open file %s for writing", fileName.c_str());
565 return fileName;
566 }
567
568 if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
569 {
570 ALOGW("An error occurred when writing to file %s", fileName.c_str());
571 }
572 return fileName;
573}
574
Finn Williamsa4983ce2020-07-23 12:55:12 +0100575bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100576{
Finn Williamsa4983ce2020-07-23 12:55:12 +0100577 if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
578 {
579 return true;
580 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +0100581 // Account for the usage of the TensorShape empty constructor
582 if (tensorInfo.GetNumDimensions() == 0)
583 {
584 return true;
585 }
Finn Williamsa4983ce2020-07-23 12:55:12 +0100586 return !tensorInfo.GetShape().AreAllDimensionsSpecified();
587}
588
589bool AreDynamicTensorsSupported()
590{
591#if defined(ARMNN_ANDROID_NN_V1_3)
592 return true;
593#else
594 return false;
595#endif
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100596}
597
Jim Flynn829ad302019-12-13 14:43:24 +0000598std::string GetFileTimestamp()
599{
600 // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
601 // and getSupportedOperations.txt files)
602 timespec ts;
603 int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
604 std::stringstream ss;
605 if (iRet == 0)
606 {
607 ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
608 }
609 else
610 {
611 ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
612 }
613 return ss.str();
614}
615
616void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId)
617{
618 if (dumpDir.empty())
619 {
620 return;
621 }
622 if (oldName.empty())
623 {
624 return;
625 }
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100626 fs::path dumpPath = dumpDir;
627 const fs::path newFileName = dumpPath / (std::to_string(networkId) + "_networkgraph.dot");
628
Jim Flynn829ad302019-12-13 14:43:24 +0000629 int iRet = rename(oldName.c_str(), newFileName.c_str());
630 if (iRet != 0)
631 {
632 std::stringstream ss;
633 ss << "rename of [" << oldName << "] to [" << newFileName << "] failed with errno " << std::to_string(errno)
634 << " : " << std::strerror(errno);
635 ALOGW(ss.str().c_str());
636 }
637}
638
Kevin May42477c12020-03-26 13:34:14 +0000639void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
640{
641 if (memPools.empty())
642 {
643 return;
644 }
645 // Commit output buffers.
646 // Note that we update *all* pools, even if they aren't actually used as outputs -
647 // this is simpler and is what the CpuExecutor does.
648 for (auto& pool : memPools)
649 {
650 // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
651 // update() has been removed and flush() added.
652#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
653 pool.flush();
654#else
655 pool.update();
656#endif
657 }
658}
telsoa015307bc12018-03-09 13:51:08 +0000659} // namespace armnn_driver