Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
Jan Eilers | 43a430d | 2020-02-28 15:40:44 +0000 | [diff] [blame] | 5 | // Note: the ArmnnBurstExecutorWithCache in this file is based on Android code |
| 6 | // under the Apache 2.0 license. See comment below for details. |
| 7 | // |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 8 | |
| 9 | #define LOG_TAG "ArmnnDriver" |
| 10 | |
| 11 | #include "ArmnnPreparedModel_1_2.hpp" |
| 12 | #include "Utils.hpp" |
| 13 | |
| 14 | #include <boost/format.hpp> |
| 15 | #include <log/log.h> |
| 16 | #include <OperationsUtils.h> |
| 17 | #include <ExecutionBurstServer.h> |
| 18 | #include <ValidateHal.h> |
| 19 | |
| 20 | #include <cassert> |
| 21 | #include <cinttypes> |
| 22 | |
| 23 | using namespace android; |
| 24 | using namespace android::hardware; |
| 25 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 26 | namespace { |
| 27 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 28 | static const Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 29 | using namespace armnn_driver; |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 30 | using TimePoint = std::chrono::steady_clock::time_point; |
| 31 | |
| 32 | TimePoint Now() |
| 33 | { |
| 34 | return std::chrono::steady_clock::now(); |
| 35 | } |
| 36 | |
| 37 | unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint) |
| 38 | { |
| 39 | return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>( |
| 40 | endPoint - startPoint).count()); |
| 41 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 42 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 43 | void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback, |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 44 | V1_0::ErrorStatus errorStatus, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 45 | std::vector<OutputShape>, |
| 46 | const Timing, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 47 | std::string callingFunction) |
| 48 | { |
| 49 | Return<void> returned = callback->notify(errorStatus); |
| 50 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 51 | if (!returned.isOk()) |
| 52 | { |
| 53 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 54 | callingFunction.c_str(), returned.description().c_str()); |
| 55 | } |
| 56 | } |
| 57 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 58 | void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback, |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 59 | V1_0::ErrorStatus errorStatus, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 60 | std::vector<OutputShape> outputShapes, |
| 61 | const Timing timing, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 62 | std::string callingFunction) |
| 63 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 64 | Return<void> returned = callback->notify_1_2(errorStatus, outputShapes, timing); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 65 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 66 | if (!returned.isOk()) |
| 67 | { |
| 68 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 69 | callingFunction.c_str(), returned.description().c_str()); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | bool ValidateRequestArgument(const RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo) |
| 74 | { |
| 75 | if (requestArg.dimensions.size() != 0) |
| 76 | { |
| 77 | if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions()) |
| 78 | { |
| 79 | ALOGE("Mismatched dimensions (request argument: %zu, expected: %u)", |
| 80 | requestArg.dimensions.size(), tensorInfo.GetNumDimensions()); |
| 81 | return false; |
| 82 | } |
| 83 | |
| 84 | for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d) |
| 85 | { |
| 86 | if (requestArg.dimensions[d] != tensorInfo.GetShape()[d]) |
| 87 | { |
| 88 | ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)", |
| 89 | d, requestArg.dimensions[d], tensorInfo.GetShape()[d]); |
| 90 | return false; |
| 91 | } |
| 92 | } |
| 93 | } |
| 94 | |
| 95 | return true; |
| 96 | } |
| 97 | |
| 98 | armnn::Tensor GetTensorForRequestArgument(const RequestArgument& requestArg, |
| 99 | const armnn::TensorInfo& tensorInfo, |
| 100 | const std::vector<::android::nn::RunTimePoolInfo>& requestPools) |
| 101 | { |
| 102 | if (!ValidateRequestArgument(requestArg, tensorInfo)) |
| 103 | { |
| 104 | return armnn::Tensor(); |
| 105 | } |
| 106 | |
| 107 | return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools)); |
| 108 | } |
| 109 | |
| 110 | inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index) |
| 111 | { |
| 112 | return tensorNamePrefix + std::to_string(index); |
| 113 | } |
| 114 | |
| 115 | } // anonymous namespace |
| 116 | |
| 117 | using namespace android::hardware; |
| 118 | |
| 119 | namespace armnn_driver |
| 120 | { |
| 121 | |
| 122 | template<typename HalVersion> |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 123 | RequestThread<ArmnnPreparedModel_1_2, HalVersion, CallbackContext_1_2> |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 124 | ArmnnPreparedModel_1_2<HalVersion>::m_RequestThread; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 125 | |
| 126 | template<typename HalVersion> |
| 127 | template<typename TensorBindingCollection> |
| 128 | void ArmnnPreparedModel_1_2<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix, |
| 129 | const TensorBindingCollection& tensorBindings) |
| 130 | { |
| 131 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 132 | { |
| 133 | const std::string requestName = boost::str(boost::format("%1%_%2%.dump") % m_NetworkId % m_RequestCount); |
| 134 | for (std::size_t i = 0u; i < tensorBindings.size(); ++i) |
| 135 | { |
| 136 | DumpTensor(m_RequestInputsAndOutputsDumpDir, |
| 137 | requestName, |
| 138 | BuildTensorName(tensorNamePrefix, i), |
| 139 | tensorBindings[i].second); |
| 140 | } |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | template<typename HalVersion> |
| 145 | ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId, |
| 146 | armnn::IRuntime* runtime, |
| 147 | const V1_2::Model& model, |
| 148 | const std::string& requestInputsAndOutputsDumpDir, |
| 149 | const bool gpuProfilingEnabled) |
| 150 | : m_NetworkId(networkId) |
| 151 | , m_Runtime(runtime) |
| 152 | , m_Model(model) |
| 153 | , m_RequestCount(0) |
| 154 | , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir) |
| 155 | , m_GpuProfilingEnabled(gpuProfilingEnabled) |
| 156 | { |
| 157 | // Enable profiling if required. |
| 158 | m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled); |
| 159 | } |
| 160 | |
| 161 | template<typename HalVersion> |
| 162 | ArmnnPreparedModel_1_2<HalVersion>::~ArmnnPreparedModel_1_2() |
| 163 | { |
| 164 | // Get a hold of the profiler used by this model. |
| 165 | std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId); |
| 166 | |
| 167 | // Unload the network associated with this model. |
| 168 | m_Runtime->UnloadNetwork(m_NetworkId); |
| 169 | |
| 170 | // Dump the profiling info to a file if required. |
| 171 | DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId, profiler.get()); |
| 172 | } |
| 173 | |
| 174 | template<typename HalVersion> |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 175 | Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const V1_0::Request& request, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 176 | const ::android::sp<V1_0::IExecutionCallback>& callback) |
| 177 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 178 | if (callback.get() == nullptr) |
| 179 | { |
| 180 | ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed"); |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 181 | return V1_0::ErrorStatus::INVALID_ARGUMENT; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 182 | } |
| 183 | |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 184 | auto cb = [callback](V1_0::ErrorStatus errorStatus, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 185 | std::vector<OutputShape> outputShapes, |
| 186 | const Timing& timing, |
| 187 | std::string callingFunction) |
| 188 | { |
| 189 | NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction); |
| 190 | }; |
| 191 | |
| 192 | return Execute(request, MeasureTiming::NO, cb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | template<typename HalVersion> |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 196 | Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2( |
| 197 | const V1_0::Request& request, |
| 198 | MeasureTiming measureTiming, |
| 199 | const sp<V1_2::IExecutionCallback>& callback) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 200 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 201 | if (callback.get() == nullptr) |
| 202 | { |
| 203 | ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed"); |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 204 | return V1_0::ErrorStatus::INVALID_ARGUMENT; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 205 | } |
| 206 | |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 207 | auto cb = [callback](V1_0::ErrorStatus errorStatus, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 208 | std::vector<OutputShape> outputShapes, |
| 209 | const Timing& timing, |
| 210 | std::string callingFunction) |
| 211 | { |
| 212 | NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction); |
| 213 | }; |
| 214 | |
| 215 | return Execute(request, measureTiming, cb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 216 | } |
| 217 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 218 | OutputShape ComputeShape(const armnn::TensorInfo& info) |
| 219 | { |
| 220 | OutputShape shape; |
| 221 | |
| 222 | hidl_vec<uint32_t> dimensions; |
| 223 | |
| 224 | armnn::TensorShape tensorShape = info.GetShape(); |
| 225 | const unsigned int numDims = tensorShape.GetNumDimensions(); |
| 226 | dimensions.resize(numDims); |
| 227 | |
| 228 | for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx) |
| 229 | { |
| 230 | dimensions[outputIdx] = tensorShape[outputIdx]; |
| 231 | } |
| 232 | |
| 233 | shape.dimensions = dimensions; |
| 234 | shape.isSufficient = true; |
| 235 | |
| 236 | return shape; |
| 237 | } |
| 238 | |
| 239 | template<typename HalVersion> |
| 240 | Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForInputs( |
| 241 | armnn::InputTensors& inputs, |
| 242 | const V1_0::Request& request, |
| 243 | const std::vector<android::nn::RunTimePoolInfo>& memPools) |
| 244 | { |
| 245 | inputs.reserve(request.inputs.size()); |
| 246 | for (unsigned int i = 0; i < request.inputs.size(); i++) |
| 247 | { |
| 248 | const auto& inputArg = request.inputs[i]; |
| 249 | |
| 250 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 251 | const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools); |
| 252 | |
| 253 | if (inputTensor.GetMemoryArea() == nullptr) |
| 254 | { |
| 255 | ALOGE("Cannot execute request. Error converting request input %u to tensor", i); |
| 256 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 257 | } |
| 258 | |
| 259 | inputs.emplace_back(i, inputTensor); |
| 260 | } |
| 261 | |
| 262 | return V1_0::ErrorStatus::NONE; |
| 263 | } |
| 264 | |
| 265 | template<typename HalVersion> |
| 266 | Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOutputs( |
| 267 | armnn::OutputTensors& outputs, |
| 268 | std::vector<OutputShape> &outputShapes, |
| 269 | const V1_0::Request& request, |
| 270 | const std::vector<android::nn::RunTimePoolInfo>& memPools) |
| 271 | { |
| 272 | outputs.reserve(request.outputs.size()); |
| 273 | for (unsigned int i = 0; i < request.outputs.size(); i++) |
| 274 | { |
| 275 | const auto& outputArg = request.outputs[i]; |
| 276 | |
| 277 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 278 | const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools); |
| 279 | if (outputTensor.GetMemoryArea() == nullptr) |
| 280 | { |
| 281 | ALOGE("Cannot execute request. Error converting request output %u to tensor", i); |
| 282 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 283 | } |
| 284 | |
| 285 | const size_t outputSize = outputTensorInfo.GetNumBytes(); |
| 286 | const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size(); |
| 287 | if (bufferSize < outputSize) |
| 288 | { |
| 289 | ALOGW("ArmnnPreparedModel_1_2::Execute failed"); |
| 290 | return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE; |
| 291 | } |
| 292 | |
| 293 | outputs.emplace_back(i, outputTensor); |
| 294 | outputShapes[i] = ComputeShape(outputTensorInfo); |
| 295 | } |
| 296 | |
| 297 | return V1_0::ErrorStatus::NONE; |
| 298 | } |
| 299 | |
| 300 | template<typename HalVersion> |
| 301 | Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForIO( |
| 302 | armnn::InputTensors& inputs, |
| 303 | armnn::OutputTensors& outputs, |
| 304 | std::vector<android::nn::RunTimePoolInfo>& memPools, |
| 305 | const V1_0::Request& request, |
| 306 | CallbackAsync_1_2 callback) |
| 307 | { |
| 308 | if (!setRunTimePoolInfosFromHidlMemories(&memPools, request.pools)) |
| 309 | { |
| 310 | callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
| 311 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 312 | } |
| 313 | |
| 314 | // add the inputs and outputs with their data |
| 315 | try |
| 316 | { |
| 317 | if (PrepareMemoryForInputs(inputs, request, memPools) != V1_0::ErrorStatus::NONE) |
| 318 | { |
| 319 | callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
| 320 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 321 | } |
| 322 | |
| 323 | std::vector<OutputShape> outputShapes(request.outputs.size()); |
| 324 | |
| 325 | auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools); |
| 326 | if (errorStatus != V1_0::ErrorStatus::NONE) |
| 327 | { |
| 328 | callback(errorStatus, |
| 329 | outputShapes, |
| 330 | g_NoTiming, |
| 331 | "ArmnnPreparedModel_1_2::Execute"); |
| 332 | return errorStatus; |
| 333 | } |
| 334 | } |
| 335 | catch (armnn::Exception& e) |
| 336 | { |
| 337 | ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); |
| 338 | callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
| 339 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 340 | } |
| 341 | catch (std::exception& e) |
| 342 | { |
| 343 | ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what()); |
| 344 | callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
| 345 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 346 | } |
| 347 | |
| 348 | return V1_0::ErrorStatus::NONE; |
| 349 | } |
| 350 | |
| 351 | void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools) |
| 352 | { |
| 353 | if (memPools.empty()) |
| 354 | { |
| 355 | return; |
| 356 | } |
| 357 | // Commit output buffers. |
| 358 | // Note that we update *all* pools, even if they aren't actually used as outputs - |
| 359 | // this is simpler and is what the CpuExecutor does. |
| 360 | for (auto& pool : memPools) |
| 361 | { |
| 362 | // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where |
| 363 | // update() has been removed and flush() added. |
| 364 | #if defined(ARMNN_ANDROID_R) // Use the new Android implementation. |
| 365 | pool.flush(); |
| 366 | #else |
| 367 | pool.update(); |
| 368 | #endif |
| 369 | } |
| 370 | } |
| 371 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 372 | template<typename HalVersion> |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 373 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const V1_0::Request& request, |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 374 | MeasureTiming measureTiming, |
| 375 | executeSynchronously_cb cb) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 376 | { |
| 377 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str()); |
| 378 | m_RequestCount++; |
| 379 | |
| 380 | if (cb == nullptr) |
| 381 | { |
| 382 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid callback passed"); |
| 383 | return Void(); |
| 384 | } |
| 385 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 386 | TimePoint driverStart; |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 387 | |
| 388 | if (measureTiming == MeasureTiming::YES) |
| 389 | { |
| 390 | driverStart = Now(); |
| 391 | } |
| 392 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 393 | if (!android::nn::validateRequest(request, m_Model)) |
| 394 | { |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 395 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model"); |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 396 | cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 397 | return Void(); |
| 398 | } |
| 399 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 400 | auto cbWrapper = [cb](V1_0::ErrorStatus errorStatus, |
| 401 | std::vector<OutputShape> outputShapes, |
| 402 | const Timing& timing, |
| 403 | std::string) |
| 404 | { |
| 405 | cb(errorStatus, outputShapes, timing); |
| 406 | }; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 407 | |
| 408 | // map the memory pool into shared pointers |
| 409 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 410 | auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 411 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 412 | // allocate the tensors on the heap, as they are passed to the request thread |
| 413 | auto inputs = std::make_shared<armnn::InputTensors>(); |
| 414 | auto outputs = std::make_shared<armnn::OutputTensors>(); |
| 415 | |
| 416 | auto prepareStatus = PrepareMemoryForIO(*inputs, *outputs, *memPools, request, cbWrapper); |
| 417 | if (prepareStatus != V1_0::ErrorStatus::NONE) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 418 | { |
Kevin May | 7bdaac5 | 2020-02-10 12:10:07 +0000 | [diff] [blame] | 419 | return Void(); |
| 420 | } |
| 421 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 422 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() before Execution"); |
| 423 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 424 | CallbackContext_1_2 cbCtx; |
| 425 | cbCtx.callback = cbWrapper; |
| 426 | cbCtx.ctx.measureTimings = measureTiming; |
| 427 | cbCtx.ctx.driverStart = driverStart; |
| 428 | ExecuteGraph(memPools, *inputs, *outputs, cbCtx); |
| 429 | |
| 430 | return Void(); |
| 431 | } |
| 432 | |
| 433 | template<typename HalVersion> |
| 434 | template<typename CallbackContext> |
| 435 | bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph( |
| 436 | std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools, |
| 437 | armnn::InputTensors& inputTensors, |
| 438 | armnn::OutputTensors& outputTensors, |
| 439 | CallbackContext cb) |
| 440 | { |
| 441 | ALOGV("ArmnnPreparedModel_1_2::ExecuteGraph(...)"); |
| 442 | |
| 443 | TimePoint driverEnd, deviceStart, deviceEnd; |
| 444 | |
| 445 | DumpTensorsIfRequired("Input", inputTensors); |
| 446 | |
| 447 | std::vector<OutputShape> outputShapes(outputTensors.size()); |
| 448 | for (unsigned int i = 0; i < outputTensors.size(); i++) |
| 449 | { |
| 450 | std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i]; |
| 451 | const armnn::Tensor outputTensor = outputTensorPair.second; |
| 452 | const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo(); |
| 453 | |
| 454 | outputShapes[i] = ComputeShape(outputTensorInfo); |
| 455 | } |
| 456 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 457 | // run it |
| 458 | try |
| 459 | { |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 460 | if (cb.ctx.measureTimings == MeasureTiming::YES) |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 461 | { |
| 462 | deviceStart = Now(); |
| 463 | } |
| 464 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 465 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 466 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 467 | if (cb.ctx.measureTimings == MeasureTiming::YES) |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 468 | { |
| 469 | deviceEnd = Now(); |
| 470 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 471 | if (status != armnn::Status::Success) |
| 472 | { |
| 473 | ALOGW("EnqueueWorkload failed"); |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 474 | cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, |
| 475 | "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 476 | return false; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 477 | } |
| 478 | } |
Kevin May | 7bdaac5 | 2020-02-10 12:10:07 +0000 | [diff] [blame] | 479 | catch (armnn::Exception& e) |
| 480 | { |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 481 | ALOGW("armnn:Exception caught from EnqueueWorkload: %s", e.what()); |
| 482 | cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 483 | return false; |
Kevin May | 7bdaac5 | 2020-02-10 12:10:07 +0000 | [diff] [blame] | 484 | } |
Derek Lamberti | b9cb844 | 2019-11-28 13:34:48 +0000 | [diff] [blame] | 485 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 486 | { |
Kevin May | 7bdaac5 | 2020-02-10 12:10:07 +0000 | [diff] [blame] | 487 | ALOGE("std::exception caught from EnqueueWorkload: %s", e.what()); |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 488 | cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 489 | return false; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 490 | } |
| 491 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 492 | CommitPools(*pMemPools); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 493 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 494 | DumpTensorsIfRequired("Output", outputTensors); |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 495 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 496 | if (cb.ctx.measureTimings == MeasureTiming::YES) |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 497 | { |
| 498 | driverEnd = Now(); |
| 499 | Timing timing; |
| 500 | timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart); |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 501 | timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.ctx.driverStart); |
| 502 | ALOGV("ArmnnPreparedModel_1_2::execute timing - Device = %lu Driver = %lu", timing.timeOnDevice, |
| 503 | timing.timeInDriver); |
| 504 | cb.callback(V1_0::ErrorStatus::NONE, outputShapes, timing, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 505 | } else { |
| 506 | cb.callback(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 507 | } |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 508 | |
| 509 | return true; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 510 | } |
| 511 | |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 512 | template<typename HalVersion> |
| 513 | bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs() |
| 514 | { |
| 515 | std::vector<std::vector<char>> storage; |
| 516 | armnn::InputTensors inputTensors; |
| 517 | for (unsigned int i = 0; i < m_Model.inputIndexes.size(); i++) |
| 518 | { |
| 519 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 520 | storage.emplace_back(inputTensorInfo.GetNumBytes()); |
| 521 | const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data()); |
| 522 | |
| 523 | inputTensors.emplace_back(i, inputTensor); |
| 524 | } |
| 525 | |
| 526 | armnn::OutputTensors outputTensors; |
| 527 | for (unsigned int i = 0; i < m_Model.outputIndexes.size(); i++) |
| 528 | { |
| 529 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 530 | storage.emplace_back(outputTensorInfo.GetNumBytes()); |
| 531 | const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data()); |
| 532 | |
| 533 | outputTensors.emplace_back(i, outputTensor); |
| 534 | } |
| 535 | |
| 536 | auto nullCallback = [](V1_0::ErrorStatus, std::vector<OutputShape>, const Timing&, std::string) {}; |
| 537 | CallbackContext_1_2 callbackContext; |
| 538 | callbackContext.callback = nullCallback; |
| 539 | callbackContext.ctx.measureTimings = MeasureTiming::NO; |
| 540 | auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>(); |
| 541 | return ExecuteGraph(memPools, |
| 542 | inputTensors, |
| 543 | outputTensors, |
| 544 | callbackContext); |
| 545 | } |
| 546 | |
| 547 | template<typename HalVersion> |
| 548 | Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const V1_0::Request& request, |
| 549 | MeasureTiming measureTiming, |
| 550 | CallbackAsync_1_2 callback) |
| 551 | { |
| 552 | ExecutionContext_1_2 ctx; |
| 553 | if (measureTiming == MeasureTiming::YES) |
| 554 | { |
| 555 | ctx.measureTimings = measureTiming; |
| 556 | ctx.driverStart = Now(); |
| 557 | } |
| 558 | |
| 559 | ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str()); |
| 560 | m_RequestCount++; |
| 561 | |
| 562 | if (!android::nn::validateRequest(request, m_Model)) |
| 563 | { |
| 564 | callback(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
| 565 | return V1_0::ErrorStatus::INVALID_ARGUMENT; |
| 566 | } |
| 567 | |
| 568 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 569 | { |
| 570 | ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(&callback)); |
| 571 | } |
| 572 | |
| 573 | // map the memory pool into shared pointers |
| 574 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
| 575 | auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
| 576 | |
| 577 | // allocate the tensors on the heap, as they are passed to the request thread |
| 578 | auto inputTensors = std::make_shared<armnn::InputTensors>(); |
| 579 | auto outputTensors = std::make_shared<armnn::OutputTensors>(); |
| 580 | |
| 581 | auto prepareStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request, callback); |
| 582 | switch(prepareStatus) |
| 583 | { |
| 584 | case V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE: |
| 585 | return V1_0::ErrorStatus::NONE; |
| 586 | case V1_0::ErrorStatus::GENERAL_FAILURE: |
| 587 | return V1_0::ErrorStatus::GENERAL_FAILURE; |
| 588 | default: |
| 589 | {} |
| 590 | } |
| 591 | |
| 592 | ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg"); |
| 593 | |
| 594 | // post the request for asynchronous execution |
| 595 | CallbackContext_1_2 cb; |
| 596 | cb.callback = callback; |
| 597 | cb.ctx = ctx; |
| 598 | m_RequestThread.PostMsg(this, memPools, inputTensors, outputTensors, cb); |
| 599 | ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg"); |
| 600 | return V1_0::ErrorStatus::NONE; |
| 601 | } |
| 602 | |
| 603 | |
Jan Eilers | 43a430d | 2020-02-28 15:40:44 +0000 | [diff] [blame] | 604 | /// This class is strongly inspired by the default implementation in Android named DefaultBurstExecutorWithCache. |
| 605 | /// The original code is licensed under Apache-2.0 and can be found at the following link: |
| 606 | /// https://android.googlesource.com/platform/frameworks/ |
| 607 | /// ml/+/refs/tags/android-10.0.0_r20/nn/common/ExecutionBurstServer.cpp |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 608 | class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache { |
| 609 | public: |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 610 | ArmnnBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel) |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 611 | : m_PreparedModel(preparedModel) |
| 612 | {} |
| 613 | |
| 614 | bool isCacheEntryPresent(int32_t slot) const override |
| 615 | { |
| 616 | const auto it = m_MemoryCache.find(slot); |
| 617 | return (it != m_MemoryCache.end()) && it->second.valid(); |
| 618 | } |
| 619 | |
| 620 | void addCacheEntry(const hidl_memory& memory, int32_t slot) override |
| 621 | { |
| 622 | m_MemoryCache[slot] = memory; |
| 623 | } |
| 624 | |
| 625 | void removeCacheEntry(int32_t slot) override |
| 626 | { |
| 627 | m_MemoryCache.erase(slot); |
| 628 | } |
| 629 | |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 630 | std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute( |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 631 | const V1_0::Request& request, const std::vector<int32_t>& slots, |
| 632 | MeasureTiming measure) override |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 633 | { |
| 634 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute"); |
| 635 | hidl_vec<hidl_memory> pools(slots.size()); |
| 636 | |
| 637 | std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 638 | { |
| 639 | return m_MemoryCache[slot]; |
| 640 | }); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 641 | |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 642 | V1_0::Request fullRequest = request; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 643 | fullRequest.pools = std::move(pools); |
| 644 | |
| 645 | // Setup Callback |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 646 | V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 647 | hidl_vec<OutputShape> returnedOutputShapes; |
| 648 | Timing returnedTiming; |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 649 | auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](V1_0::ErrorStatus status, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 650 | const hidl_vec<OutputShape>& outputShapes, |
| 651 | const Timing& timing) |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 652 | { |
| 653 | returnedStatus = status; |
| 654 | returnedOutputShapes = outputShapes; |
| 655 | returnedTiming = timing; |
| 656 | }; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 657 | |
| 658 | // Execute |
| 659 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing"); |
| 660 | const Return<void> ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb); |
| 661 | |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 662 | if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE) |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 663 | { |
| 664 | ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing"); |
| 665 | } |
| 666 | return std::make_tuple(returnedStatus, std::move(returnedOutputShapes), returnedTiming); |
| 667 | } |
| 668 | |
| 669 | private: |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 670 | V1_2::IPreparedModel* const m_PreparedModel; |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 671 | std::map<int, hidl_memory> m_MemoryCache; |
| 672 | }; |
| 673 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 674 | template<typename HalVersion> |
| 675 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::configureExecutionBurst( |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 676 | const sp<V1_2::IBurstCallback>& callback, |
| 677 | const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, |
| 678 | const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, |
| 679 | V1_2::IPreparedModel::configureExecutionBurst_cb cb) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 680 | { |
| 681 | ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst"); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 682 | const std::shared_ptr<ArmnnBurstExecutorWithCache> executorWithCache = |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 683 | std::make_shared<ArmnnBurstExecutorWithCache>(this); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 684 | const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create(callback, |
| 685 | requestChannel, |
| 686 | resultChannel, |
| 687 | executorWithCache); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 688 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 689 | if (burst == nullptr) |
| 690 | { |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 691 | cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 692 | } |
| 693 | else |
| 694 | { |
Kevin May | ec1e5b8 | 2020-02-26 17:00:39 +0000 | [diff] [blame] | 695 | cb(V1_0::ErrorStatus::NONE, burst); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 696 | } |
| 697 | return Void(); |
| 698 | } |
| 699 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 700 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 701 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 702 | #ifdef ARMNN_ANDROID_NN_V1_2 |
| 703 | template class ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>; |
Derek Lamberti | 4de83c5 | 2020-03-17 13:40:18 +0000 | [diff] [blame] | 704 | template bool ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>::ExecuteGraph<CallbackContext_1_2>( |
| 705 | std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools, |
| 706 | armnn::InputTensors& pInputTensors, |
| 707 | armnn::OutputTensors& pOutputTensors, |
| 708 | CallbackContext_1_2 cb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 709 | #endif |
| 710 | |
| 711 | } // namespace armnn_driver |