blob: 1747f61f8c504eb7b01ce413ed70849efd41cbf0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
arovir01b0717b52018-09-05 17:03:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010013#include <armnn/utility/Assert.hpp>
Jan Eilers0b7a4192020-03-09 18:20:42 +000014#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan9b088d92020-09-14 15:12:55 +010015#include <armnn/utility/NumericCast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000017#include <armnnUtils/DataLayoutIndexed.hpp>
Mike Kelly4a956582020-02-28 10:32:09 +000018#include <armnnUtils/Transpose.hpp>
arovir01b0717b52018-09-05 17:03:25 +010019
Mike Kelly46272802019-08-14 17:00:48 +010020#include "1.0/FullyConnected.hpp"
21
arovir01b0717b52018-09-05 17:03:25 +010022#include <ActivationFunctor.h>
23#include <CpuExecutor.h>
24#include <OperationsUtils.h>
25
James Ward4e22f602020-10-20 15:50:33 +010026#include <armnnUtils/FloatingPointComparison.hpp>
arovir01b0717b52018-09-05 17:03:25 +010027
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
Kevin DuBoisa2cb5482020-08-26 13:41:12 -070031#ifdef __clang__
32#pragma clang diagnostic push
33#pragma clang diagnostic ignored "-Wunneeded-internal-declaration"
34#pragma clang diagnostic ignored "-Wunused-function"
35#pragma clang diagnostic ignored "-Wunused-variable"
36#endif
arovir01b0717b52018-09-05 17:03:25 +010037namespace armnn_driver
38{
39
40///
41/// Helper classes
42///
43
Kevin Mayec1e5b82020-02-26 17:00:39 +000044#ifdef ARMNN_ANDROID_R
Kevin DuBoisc0945c72020-11-20 16:57:09 -080045using OperandType = android::nn::OperandType;
Kevin Mayec1e5b82020-02-26 17:00:39 +000046#endif
47
arovir01b0717b52018-09-05 17:03:25 +010048struct ConversionData
49{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010050 ConversionData(const std::vector<armnn::BackendId>& backends)
51 : m_Backends(backends)
52 , m_Network(nullptr, nullptr)
Finn Williams291a16b2020-08-19 22:54:00 +010053 , m_DynamicInputsEncountered(false)
arovir01b0717b52018-09-05 17:03:25 +010054 {}
55
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010056 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010057 armnn::INetworkPtr m_Network;
58 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
59 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
Finn Williams291a16b2020-08-19 22:54:00 +010060 bool m_DynamicInputsEncountered;
arovir01b0717b52018-09-05 17:03:25 +010061};
62
63class LayerInputHandle
64{
65public:
66 LayerInputHandle();
67 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
68
69 bool IsValid() const;
70
71 void Connect(armnn::IInputSlot& inputSlot);
72
Finn Williamsa4983ce2020-07-23 12:55:12 +010073 void Disconnect(armnn::IInputSlot& inputSlot);
74
arovir01b0717b52018-09-05 17:03:25 +010075 const armnn::TensorInfo& GetTensorInfo() const;
76
77private:
78 armnn::IOutputSlot* m_OutputSlot;
79 bool m_Valid;
80 armnn::TensorInfo m_TensorInfo;
81};
82
83class ConstTensorPin
84{
85public:
86 // Creates an invalid tensor pin (can be used to signal errors)
87 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
88 ConstTensorPin(bool optional = false);
89
90 // @param tensorInfo TensorInfo associated with the tensor.
91 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
92 // the model being converted.
93 // @param numBytes Number of bytes for the tensor data.
94 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
95 const armnn::PermutationVector& mappings);
96
97 ConstTensorPin(const ConstTensorPin& other) = delete;
98 ConstTensorPin(ConstTensorPin&& other) = default;
99
100 bool IsValid() const;
101 bool IsOptional() const;
102
103 const armnn::ConstTensor& GetConstTensor() const;
104 const armnn::ConstTensor* GetConstTensorPtr() const;
105
106private:
107 armnn::ConstTensor m_ConstTensor;
108
109 // Owned memory for swizzled tensor data, only required if the tensor needed
110 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
111 // the pools associated with the model being converted.
112 std::vector<uint8_t> m_SwizzledTensorData;
113
114 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
115 bool m_Optional;
116};
117
118} // namespace armnn_driver
119
120///
121/// Utility functions
122///
123
124namespace
125{
126
127using namespace armnn_driver;
128using namespace android::nn;
129
130// Convenience function to log the reason for failing to convert a model.
131// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
132template<class... Args>
133static bool Fail(const char* formatStr, Args&&... args)
134{
135 ALOGD(formatStr, std::forward<Args>(args)...);
136 return false;
137}
138
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
140// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
141#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100142try \
143{ \
144 for (auto&& backendId : backends) \
145 { \
146 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
147 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100148 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100149 std::string reasonIfUnsupported; \
150 supported = \
151 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
152 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100153 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 } \
156 else \
157 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 if (reasonIfUnsupported.size() > 0) \
159 { \
160 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
161 } \
162 else \
163 { \
164 ALOGD("%s: not supported by armnn", funcName); \
165 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100166 } \
167 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100168 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100169 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100170 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100171 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100172 } \
173 if (!supported) \
174 { \
175 ALOGD("%s: not supported by any specified backend", funcName); \
176 } \
177} \
178catch (const armnn::InvalidArgumentException &e) \
179{ \
180 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
181}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100182
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000183template<typename HalOperand>
184armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100185{
186 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
187}
188
Matthew Bentham912b3622019-05-03 15:49:14 +0100189inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100190{
Matthew Bentham912b3622019-05-03 15:49:14 +0100191 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
192 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
193 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100194}
195
Kevin May42477c12020-03-26 13:34:14 +0000196#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100197
Keith Davis71006492020-01-06 17:44:16 +0000198// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100199inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
200{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 return type == V1_2::OperandType::BOOL ||
Sadik Armagan793a70c2020-03-19 13:54:04 +0000202 type == V1_2::OperandType::TENSOR_BOOL8 ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000203 type == V1_2::OperandType::TENSOR_FLOAT16 ||
204 type == V1_2::OperandType::TENSOR_FLOAT32 ||
205 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000206 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000207 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
208 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209 type == V1_2::OperandType::TENSOR_INT32;
210}
211
212#endif
213
Kevin May42477c12020-03-26 13:34:14 +0000214#ifdef ARMNN_ANDROID_NN_V1_3
215
216// Support within the 1.3 driver for specific tensor data types
217inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
218{
219 return type == V1_3::OperandType::BOOL ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100220 type == V1_3::OperandType::TENSOR_BOOL8 ||
Kevin May42477c12020-03-26 13:34:14 +0000221 type == V1_3::OperandType::TENSOR_FLOAT16 ||
222 type == V1_3::OperandType::TENSOR_FLOAT32 ||
223 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100224 type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
Kevin May42477c12020-03-26 13:34:14 +0000225 type == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
226 type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
227 type == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
228 type == V1_3::OperandType::TENSOR_INT32;
229}
230
231#endif
232
Mike Kellyb5fdf382019-06-11 16:35:25 +0100233inline bool IsBool(V1_0::Operand)
234{
235 return false;
236}
237
Kevin May42477c12020-03-26 13:34:14 +0000238inline bool Is12OrLaterOperand(V1_0::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100239{
240 return false;
241}
242
Kevin May42477c12020-03-26 13:34:14 +0000243#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100244
245inline bool IsBool(V1_2::Operand operand)
246{
247 return operand.type == V1_2::OperandType::BOOL;
248}
249
Sadik Armagan61113162019-07-25 09:09:40 +0100250/// Checks if a operand is 1_2 Operand
Kevin May42477c12020-03-26 13:34:14 +0000251inline bool Is12OrLaterOperand(V1_2::Operand)
252{
253 return true;
254}
255
256#endif
257
258#ifdef ARMNN_ANDROID_NN_V1_3
259
260inline bool IsBool(V1_3::Operand operand)
261{
262 return operand.type == V1_3::OperandType::BOOL;
263}
264
265/// Checks if a operand is 1_2 Operand
266inline bool Is12OrLaterOperand(V1_3::Operand)
Sadik Armagan61113162019-07-25 09:09:40 +0100267{
268 return true;
269}
270
Mike Kellyb5fdf382019-06-11 16:35:25 +0100271#endif
272
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000274armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
275 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100276 armnn::TensorInfo reshapeInfo)
277{
278 armnn::ReshapeDescriptor reshapeDescriptor;
279 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
280
281 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100282 ARMNN_ASSERT(reshapeLayer != nullptr);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100283
284 // Attach the input layer to the reshape layer
285 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
286 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
287
288 return *reshapeLayer;
289}
290
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000291bool BroadcastTensor(LayerInputHandle& input0,
292 LayerInputHandle& input1,
293 armnn::IConnectableLayer* startLayer,
294 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100295{
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100296 ARMNN_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100297
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100298 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
299 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
300
301 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
302 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
303
304 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100305 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100306 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
307 input0.Connect(startLayer->GetInputSlot(0));
308 input1.Connect(startLayer->GetInputSlot(1));
309
Sadik Armagan64b19b52019-08-19 09:49:58 +0100310 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100311 }
312
313 // Since the number of dimensions do not match then we need to add degenerate dimensions
314 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
315
316 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100317 unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
318 armnn::numeric_cast<int>(inputDimensions1));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319
320 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
321 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
322 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
323
324 const armnn::TensorShape& smallShape = smallInfo.GetShape();
325 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
326 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
327 {
328 reshapedDimensions[i] = smallShape[i - sizeDifference];
329 }
330
331 armnn::TensorInfo reshapedInfo = smallInfo;
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100332 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100333 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100334
335 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
336 armnn::ReshapeDescriptor reshapeDescriptor;
337
338 bool isSupported = false;
339 FORWARD_LAYER_SUPPORT_FUNC(__func__,
340 IsReshapeSupported,
341 data.m_Backends,
342 isSupported,
Derek Lamberti6fd4ceb2019-12-19 15:45:35 +0000343 smallInfo,
Sadik Armagan64b19b52019-08-19 09:49:58 +0100344 reshapedInfo,
345 reshapeDescriptor);
346 if (!isSupported)
347 {
348 return false;
349 }
350
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100351 ARMNN_ASSERT(data.m_Network != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100352 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100353
354 if (input0IsSmaller)
355 {
356 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
357 //
358 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100359 // | |
360 // Reshape |
361 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100362 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100363
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100364 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
365 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100366 }
367 else
368 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100369 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
370 //
371 // Input0 Input1
372 // | |
373 // | Reshape
374 // \ /
375 // StartLayer
376
arovir01b0717b52018-09-05 17:03:25 +0100377 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100378 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100379 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100380
381 return true;
arovir01b0717b52018-09-05 17:03:25 +0100382}
383
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000384void CalcPadding(uint32_t input,
385 uint32_t kernel,
386 uint32_t stride,
387 uint32_t& outPadHead,
388 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100389 android::nn::PaddingScheme scheme)
390{
391 int32_t padHead;
392 int32_t padTail;
393 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100394 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
395 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
arovir01b0717b52018-09-05 17:03:25 +0100396}
397
Kevin May42477c12020-03-26 13:34:14 +0000398#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly86b36d42019-07-12 16:39:33 +0100399
400void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
401 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
402{
403 int32_t padHead;
404 int32_t padTail;
405 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
Matthew Sloyan9b088d92020-09-14 15:12:55 +0100406 outPadHead = armnn::numeric_cast<uint32_t>(padHead);
407 outPadTail = armnn::numeric_cast<uint32_t>(padTail);
Mike Kelly86b36d42019-07-12 16:39:33 +0100408}
409
Mike Kelly26123db2020-01-15 10:02:33 +0000410void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100411 int32_t& outPadTail, android::nn::PaddingScheme scheme)
412{
413 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
414}
415
Mike Kelly86b36d42019-07-12 16:39:33 +0100416#endif
417
Matthew Bentham912b3622019-05-03 15:49:14 +0100418Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100419{
420 Shape shape;
Kevin DuBois30c34ae2020-08-26 13:53:41 -0700421 shape.type = android::nn::OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100422 shape.dimensions = operand.dimensions;
423 shape.scale = operand.scale;
424 shape.offset = operand.zeroPoint;
425 return shape;
426}
427
Kevin May42477c12020-03-26 13:34:14 +0000428#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kelly46272802019-08-14 17:00:48 +0100429
430Shape GetOperandShape(const V1_2::Operand& operand)
431{
432 Shape shape;
Kevin DuBois30c34ae2020-08-26 13:53:41 -0700433 shape.type = android::nn::OperandType(operand.type);
Mike Kelly46272802019-08-14 17:00:48 +0100434 shape.dimensions = operand.dimensions;
435 shape.scale = operand.scale;
436 shape.offset = operand.zeroPoint;
437 return shape;
438}
439
440#endif
441
Kevin May42477c12020-03-26 13:34:14 +0000442#ifdef ARMNN_ANDROID_NN_V1_3
443
444Shape GetOperandShape(const V1_3::Operand& operand)
445{
446 Shape shape;
447 shape.type = OperandType(operand.type);
448 shape.dimensions = operand.dimensions;
449 shape.scale = operand.scale;
450 shape.offset = operand.zeroPoint;
451 return shape;
452}
453
454#endif
455
arovir01b0717b52018-09-05 17:03:25 +0100456// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
457// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100458// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
459// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100460void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000461 const armnn::TensorInfo& weightInfo,
462 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100463{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000464 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100465 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000466 // NOTE: Bias scale is always set to 0 for per-axis quantization and
467 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
468 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100469 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000470 return biasScale * inputInfo.GetQuantizationScale();
471 };
472
473 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
474 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
475
476 biasInfo.SetQuantizationScales(biasScales);
477 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
478
479 ALOGV("Bias quantization params have been updated for per-axis quantization");
480 }
481 else
482 {
483 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
484 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
485 {
James Ward4e22f602020-10-20 15:50:33 +0100486 if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000487 {
488 ALOGW("Bias quantization scale has been modified to match input * weights");
489 biasInfo.SetQuantizationScale(expectedBiasScale);
490 }
arovir01b0717b52018-09-05 17:03:25 +0100491 }
492 }
493}
494
495// 4D Tensor Permutations
496const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
David Monahan7f492ac2020-10-16 10:36:29 +0100497const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
arovir01b0717b52018-09-05 17:03:25 +0100498const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
499
500// 3D Permutation Vectors
Mike Kelly4a956582020-02-28 10:32:09 +0000501const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
502const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
arovir01b0717b52018-09-05 17:03:25 +0100503
504template<typename OSlot>
Mike Kelly4a956582020-02-28 10:32:09 +0000505armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
506 const armnn::PermutationVector& mappings)
arovir01b0717b52018-09-05 17:03:25 +0100507{
508 // Add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000509 armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
arovir01b0717b52018-09-05 17:03:25 +0100510
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100511 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100512
513 // Connect input to swizzle layer
514 input.Connect(layer->GetInputSlot(0));
515
516 // Setup swizzled output
Mike Kelly4a956582020-02-28 10:32:09 +0000517 const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
arovir01b0717b52018-09-05 17:03:25 +0100518 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
519
520 return *layer;
521}
522
arovir01b0717b52018-09-05 17:03:25 +0100523bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
524 const armnn::TensorShape & outputShape,
525 uint32_t concatDim)
526{
527 // Validate the output shape is correct given the input shapes (which have just been validated)
528 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
529 if (outputShape.GetNumDimensions() != numDimensions)
530 {
531 return Fail("%s: Output shape has wrong number of dimensions", __func__);
532 }
533
534 unsigned int outputSizeAlongConcatenatedDimension = 0;
535 for (unsigned int i = 0; i < inputShapes.size(); i++)
536 {
537 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
538 }
539
540 for (unsigned int i = 0; i < numDimensions; ++i)
541 {
542 if (i == concatDim)
543 {
544 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
545 {
546 return Fail(
547 "%s: Invalid output shape for dimension %d (%d != %d)",
548 __func__,
549 i,
550 outputShape[i],
551 outputSizeAlongConcatenatedDimension);
552 }
553 }
554 else
555 {
556 if (outputShape[i] != inputShapes[0][i])
557 {
558 return Fail("%s: Invalid output shape", __func__);
559 }
560 }
561 }
562
563 return true;
564}
565
566bool RequiresReshape(armnn::TensorShape & inputShape)
567{
568 return inputShape.GetNumDimensions() < 3;
569}
570
arovir01b0717b52018-09-05 17:03:25 +0100571void SwizzleInputs(armnn::INetwork& network,
572 std::vector<LayerInputHandle>& inputs,
573 std::vector<armnn::TensorShape>& inputShapes,
574 const armnn::PermutationVector& mapping)
575{
576 if (!mapping.IsEqual(IdentityPermutation4D))
577 {
578 size_t nInputs = inputs.size();
579 for (size_t i=0; i<nInputs; ++i)
580 {
581 // add swizzle layer
Mike Kelly4a956582020-02-28 10:32:09 +0000582 armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
arovir01b0717b52018-09-05 17:03:25 +0100583 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
584 auto& outputInfo = outputSlot.GetTensorInfo();
585 // replace inputs with the swizzled ones
586 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
587 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
588 }
589 }
590}
591
Teresa Charlin185f5882020-04-06 21:59:18 +0100592bool TransposeInputTensors(ConversionData& data,
593 std::vector<LayerInputHandle>& inputs,
594 std::vector<armnn::TensorShape>& inputShapes,
595 const armnn::PermutationVector& mapping)
Kevin Mayaed08ac2019-12-12 16:33:31 +0000596{
David Monahan7f492ac2020-10-16 10:36:29 +0100597 // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
598 if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
Kevin Mayaed08ac2019-12-12 16:33:31 +0000599 {
Teresa Charlin185f5882020-04-06 21:59:18 +0100600 armnn::TensorInfo outputTransposeInfo;
Kevin Mayaed08ac2019-12-12 16:33:31 +0000601 size_t nInputs = inputs.size();
602 for (size_t i=0; i<nInputs; ++i)
603 {
604 // check permute layer
Mike Kelly4a956582020-02-28 10:32:09 +0000605 armnn::TransposeDescriptor transposeDesc;
606 transposeDesc.m_DimMappings = mapping;
Teresa Charlin185f5882020-04-06 21:59:18 +0100607 outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000608
609 bool isSupported = false;
610 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +0000611 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +0000612 data.m_Backends,
613 isSupported,
614 inputs[i].GetTensorInfo(),
Teresa Charlin185f5882020-04-06 21:59:18 +0100615 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +0000616 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +0000617 if (!isSupported)
618 {
619 return false;
620 }
621
622 }
623 SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
624 }
625 return true;
626}
627
628
narpra01f176d5a2018-11-18 20:17:48 +0000629bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
630 int32_t & concatDimension,
631 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100632{
narpra01f176d5a2018-11-18 20:17:48 +0000633 bool needPermute = false;
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100634 ARMNN_ASSERT(numberOfDimensions >= 3);
arovir01b0717b52018-09-05 17:03:25 +0100635
636 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000637 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
638 // or along dimension 0 or 2 for a 3-D tensor.
639 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100640 {
narpra01f176d5a2018-11-18 20:17:48 +0000641 concatDimension = 1;
642 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
643 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100644 }
narpra01f176d5a2018-11-18 20:17:48 +0000645 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100646 {
narpra01f176d5a2018-11-18 20:17:48 +0000647 concatDimension = 0;
648 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
649 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100650 }
David Monahan7f492ac2020-10-16 10:36:29 +0100651 // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
652 // permutation identity to only have 3 dimensions
653 else if (numberOfDimensions == 3 && concatDimension == 2)
654 {
655 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
656 }
narpra01f176d5a2018-11-18 20:17:48 +0000657 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100658}
659
660} // anonymous namespace
661
662namespace armnn_driver
663{
664
665//// Creates an ArmNN activation layer and connects it to the given layer, if the
666//// passed in AndroidNN activation function requires so.
667//// @return The end layer of the sequence of layers built for the given AndroidNN
668//// activation function or nullptr if an error occurred (e.g. unsupported activation).
669//// Note that the end layer matches the input layer if no activation is required
670//// (the sequence of layers has length 1).
671armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
672 ActivationFn activation,
673 armnn::IConnectableLayer* prevLayer,
674 ConversionData& data);
675
676} // namespace armnn_driver
677
678///
679/// Utility templates
680///
681
682namespace armnn_driver
683{
684
685using namespace android::nn;
686
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100687template<typename HalPolicy,
688 typename HalOperand = typename HalPolicy::Operand,
689 typename HalOperation = typename HalPolicy::Operation,
690 typename HalModel = typename HalPolicy::Model>
691const HalOperand* GetInputOperand(const HalOperation& operation,
692 uint32_t inputIndex,
693 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100694 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100695{
696 if (inputIndex >= operation.inputs.size())
697 {
saoste01b8471482018-10-10 09:44:51 +0100698 if (failOnIndexOutOfBounds)
699 {
700 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
701 }
arovir01b0717b52018-09-05 17:03:25 +0100702 return nullptr;
703 }
704
Kevin May42477c12020-03-26 13:34:14 +0000705 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100706 ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
Kevin May42477c12020-03-26 13:34:14 +0000707 return &getMainModel(model).operands[operation.inputs[inputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100708}
709
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100710template<typename HalPolicy,
711 typename HalOperand = typename HalPolicy::Operand,
712 typename HalOperation = typename HalPolicy::Operation,
713 typename HalModel = typename HalPolicy::Model>
714const HalOperand* GetOutputOperand(const HalOperation& operation,
715 uint32_t outputIndex,
716 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100717{
718 if (outputIndex >= operation.outputs.size())
719 {
720 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
721 return nullptr;
722 }
723
724 // Model should have been validated beforehand
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100725 ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
arovir01b0717b52018-09-05 17:03:25 +0100726
Kevin May42477c12020-03-26 13:34:14 +0000727 return &getMainModel(model).operands[operation.outputs[outputIndex]];
arovir01b0717b52018-09-05 17:03:25 +0100728}
729
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100730template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100731 typename HalOperand = typename HalPolicy::Operand,
732 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100733const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100734 const HalModel& model,
735 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000736 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100737{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100738 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100739
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100741 switch (operand.lifetime)
742 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100743 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100744 {
745 // Constant found in model.operandValues
746 valueStart = &model.operandValues[operand.location.offset];
747 break;
748 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100749 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100750 {
751 // Constant specified via a Memory object
752 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
753 break;
754 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100755 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000756 {
757 // An optional input tensor with no values is not an error so should not register as a fail
758 if (optional)
759 {
760 valueStart = nullptr;
761 break;
762 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100763 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000764 }
arovir01b0717b52018-09-05 17:03:25 +0100765 default:
766 {
767 // Unsupported/invalid (e.g. can't get value of an input to the model)
768 Fail("%s: unsupported/invalid operand lifetime: %s",
769 __func__, toString(operand.lifetime).c_str());
770 valueStart = nullptr;
771 }
772 }
773
774 return valueStart;
775}
776
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100777template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100778 typename HalOperation = typename HalPolicy::Operation,
779 typename HalModel = typename HalPolicy::Model,
780 typename HalOperandType = typename HalPolicy::OperandType>
781bool GetOperandType(const HalOperation& operation,
782 uint32_t inputIndex,
783 const HalModel& model,
784 HalOperandType& type)
785{
786 using HalOperand = typename HalPolicy::Operand;
787
788 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
789 if (!operand)
790 {
791 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
792 }
793
794 type = operand->type;
795 return true;
796}
797
798template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000799 typename HalOperand = typename HalPolicy::Operand>
800bool IsOperandConstant(const HalOperand& operand)
801{
802 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
803
804 HalOperandLifeTime lifetime = operand.lifetime;
805
806 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
807 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
808 lifetime == HalOperandLifeTime::NO_VALUE;
809}
810
811template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100812 typename HalOperand = typename HalPolicy::Operand,
813 typename HalModel = typename HalPolicy::Model>
814ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
815 const HalModel& model,
816 const ConversionData& data,
817 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
818 const armnn::TensorShape* overrideTensorShape = nullptr,
819 bool optional = false)
820{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100821 if (!IsOperandTypeSupportedForTensors(operand.type))
822 {
823 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
824 return ConstTensorPin();
825 }
826
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000827 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100828 {
829 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
830 return ConstTensorPin();
831 }
832
833 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
834 if (!valueStart)
835 {
836 if (optional)
837 {
838 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
839 return ConstTensorPin(true);
840 }
841 // mandatory tensor with no values
842 Fail("%s: failed to get operand address", __func__);
843 return ConstTensorPin();
844 }
845
846 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000847 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
848 if (tensorInfo.HasPerAxisQuantization())
849 {
850 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
851 }
852
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 if (overrideTensorShape != nullptr)
854 {
855 tensorInfo.SetShape(*overrideTensorShape);
856 }
857 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
858}
859
860template<typename HalPolicy,
861 typename HalOperation = typename HalPolicy::Operation,
862 typename HalModel = typename HalPolicy::Model>
863ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
864 uint32_t inputIndex,
865 const HalModel& model,
866 const ConversionData& data,
867 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
868 const armnn::TensorShape* overrideTensorShape = nullptr,
869 bool optional = false)
870{
871 using HalOperand = typename HalPolicy::Operand;
872
873 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
874 if (!operand)
875 {
876 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
877 return ConstTensorPin();
878 }
879 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
880 model,
881 data,
882 dimensionMappings,
883 overrideTensorShape,
884 optional);
885}
886
887template<typename HalPolicy,
888 typename OutputType,
889 typename HalOperandType = typename HalPolicy::OperandType,
890 typename HalOperation = typename HalPolicy::Operation,
891 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100892bool GetInputScalar(const HalOperation& operation,
893 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100894 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100895 OutputType& outValue,
896 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +0100897 const ConversionData& data,
898 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100899{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100900 using HalOperand = typename HalPolicy::Operand;
901
902 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Sadik Armagan813f2302020-05-19 14:10:30 +0100903 if (!optional && !operand)
arovir01b0717b52018-09-05 17:03:25 +0100904 {
905 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
906 }
907
Sadik Armagan813f2302020-05-19 14:10:30 +0100908 if (!optional && operand->type != type)
arovir01b0717b52018-09-05 17:03:25 +0100909 {
910 return Fail("%s: unexpected operand type: %s (should be %s)",
911 __func__, toString(operand->type).c_str(), toString(type).c_str());
912 }
913
Sadik Armagan813f2302020-05-19 14:10:30 +0100914 if (!optional && operand->location.length != sizeof(OutputType))
arovir01b0717b52018-09-05 17:03:25 +0100915 {
916 return Fail("%s: incorrect operand location length: %i (should be %i)",
917 __func__, operand->location.length, sizeof(OutputType));
918 }
919
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100920 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Sadik Armagan813f2302020-05-19 14:10:30 +0100921 if (!optional && !valueAddress)
arovir01b0717b52018-09-05 17:03:25 +0100922 {
923 return Fail("%s: failed to get address for operand", __func__);
924 }
925
Sadik Armagan813f2302020-05-19 14:10:30 +0100926 if(!optional)
927 {
928 outValue = *(static_cast<const OutputType*>(valueAddress));
929 }
930
arovir01b0717b52018-09-05 17:03:25 +0100931 return true;
932}
933
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100934template<typename HalPolicy,
935 typename HalOperation = typename HalPolicy::Operation,
936 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100937bool GetInputInt32(const HalOperation& operation,
938 uint32_t inputIndex,
939 int32_t& outValue,
940 const HalModel& model,
941 const ConversionData& data)
942{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100944}
945
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100946template<typename HalPolicy,
947 typename HalOperation = typename HalPolicy::Operation,
948 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100949bool GetInputFloat32(const HalOperation& operation,
950 uint32_t inputIndex,
951 float& outValue,
952 const HalModel& model,
953 const ConversionData& data)
954{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100955 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100956}
957
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958template<typename HalPolicy,
959 typename HalOperation = typename HalPolicy::Operation,
960 typename HalOperandType = typename HalPolicy::OperandType,
961 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100962bool GetInputActivationFunctionImpl(const HalOperation& operation,
963 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100964 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100965 ActivationFn& outActivationFunction,
966 const HalModel& model,
967 const ConversionData& data)
968{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100969 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100970 {
971 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
972 __func__,
973 toString(type).c_str(),
Kevin DuBois30c34ae2020-08-26 13:53:41 -0700974 toString(HalOperandType::INT32).c_str(),
975 toString(HalOperandType::TENSOR_INT32).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100976 }
977
978 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100979 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100980 {
981 return Fail("%s: failed to get activation input value", __func__);
982 }
983 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
984 return true;
985}
986
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987template<typename HalPolicy,
988 typename HalOperation = typename HalPolicy::Operation,
989 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100990bool GetInputActivationFunction(const HalOperation& operation,
991 uint32_t inputIndex,
992 ActivationFn& outActivationFunction,
993 const HalModel& model,
994 const ConversionData& data)
995{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100996 return GetInputActivationFunctionImpl<HalPolicy>(operation,
997 inputIndex,
998 HalPolicy::OperandType::INT32,
999 outActivationFunction,
1000 model,
1001 data);
arovir01b0717b52018-09-05 17:03:25 +01001002}
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004template<typename HalPolicy,
1005 typename HalOperation = typename HalPolicy::Operation,
1006 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001007bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
1008 uint32_t inputIndex,
1009 ActivationFn& outActivationFunction,
1010 const HalModel& model,
1011 const ConversionData& data)
1012{
1013 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001014 return GetInputActivationFunctionImpl<HalPolicy>(operation,
1015 inputIndex,
1016 HalPolicy::OperandType::INT32,
1017 outActivationFunction,
1018 model,
1019 data);
arovir01b0717b52018-09-05 17:03:25 +01001020}
1021
1022
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023template<typename HalPolicy,
1024 typename HalOperation = typename HalPolicy::Operation,
1025 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001026bool GetOptionalInputActivation(const HalOperation& operation,
1027 uint32_t inputIndex,
1028 ActivationFn& activationFunction,
1029 const HalModel& model,
1030 const ConversionData& data)
1031{
1032 if (operation.inputs.size() <= inputIndex)
1033 {
1034 activationFunction = ActivationFn::kActivationNone;
1035 }
1036 else
1037 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001038 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001039 {
1040 return Fail("%s: Operation has invalid inputs", __func__);
1041 }
1042 }
1043 return true;
1044}
1045
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001046template<typename HalPolicy,
1047 typename ConvolutionDescriptor,
1048 typename HalOperation = typename HalPolicy::Operation,
1049 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001050bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
1051 uint32_t dilationXIndex,
1052 ConvolutionDescriptor& descriptor,
1053 const HalModel& model,
1054 const ConversionData& data)
1055{
1056 bool success = true;
1057 if (operation.inputs.size() >= dilationXIndex + 2)
1058 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059 success &= GetInputScalar<HalPolicy>(operation,
1060 dilationXIndex,
1061 HalPolicy::OperandType::INT32,
1062 descriptor.m_DilationX,
1063 model,
1064 data);
1065 success &= GetInputScalar<HalPolicy>(operation,
1066 dilationXIndex + 1,
1067 HalPolicy::OperandType::INT32,
1068 descriptor.m_DilationY,
1069 model,
1070 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +01001071 }
1072
1073 return success;
1074}
1075
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076template<typename HalPolicy,
David Monahan51e0b132020-04-20 16:12:06 +01001077 typename HalOperation = typename HalPolicy::Operation,
1078 typename HalModel = typename HalPolicy::Model>
1079bool GetOptionalBool(const HalOperation& operation,
1080 uint32_t inputIndex,
1081 const HalModel& model,
1082 const ConversionData& data)
1083{
1084 using HalOperand = typename HalPolicy::Operand;
1085
1086 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1087 if (!operand)
1088 {
1089 return false;
1090 }
1091
1092 if (!IsBool(*operand))
1093 {
1094 return false;
1095 }
1096
1097 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
1098 if (!valueAddress)
1099 {
1100 return false;
1101 }
1102
1103 if (*(static_cast<const bool*>(valueAddress)))
1104 {
1105 return true;
1106 }
1107 else
1108 {
1109 return false;
1110 }
1111}
1112
1113template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114 typename HalOperand = typename HalPolicy::Operand,
1115 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001116bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001117 std::vector<int32_t>& outValues,
1118 const HalModel& model,
1119 const ConversionData& data)
1120{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001121 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001122 {
1123 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1124 }
1125
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001126 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001127 if (!startAddress)
1128 {
1129 return Fail("%s: failed to get operand address", __func__, operand.type);
1130 }
1131
1132 // Check number of bytes is sensible
1133 const uint32_t numBytes = operand.location.length;
1134 if (numBytes % sizeof(int32_t) != 0)
1135 {
1136 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1137 __func__, numBytes, sizeof(int32_t));
1138 }
1139
1140 outValues.resize(numBytes / sizeof(int32_t));
1141 memcpy(outValues.data(), startAddress, numBytes);
1142 return true;
1143}
1144
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001145template<typename HalPolicy,
1146 typename HalOperation = typename HalPolicy::Operation,
1147 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001148bool GetInputPaddingScheme(const HalOperation& operation,
1149 uint32_t inputIndex,
1150 PaddingScheme& outPaddingScheme,
1151 const HalModel& model,
1152 const ConversionData& data)
1153{
1154 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001155 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001156 {
1157 return Fail("%s: failed to get padding scheme input value", __func__);
1158 }
1159
1160 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1161 return true;
1162}
1163
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164template<typename HalPolicy,
1165 typename HalOperation = typename HalPolicy::Operation,
1166 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001167LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1168 uint32_t inputIndex,
1169 const HalModel& model,
1170 ConversionData& data)
1171{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001172 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001173 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001174 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1175
1176 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001177 if (!operand)
1178 {
1179 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1180 return LayerInputHandle();
1181 }
1182
1183 if (!IsOperandTypeSupportedForTensors(operand->type))
1184 {
1185 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1186 return LayerInputHandle();
1187 }
1188
Sadik Armagan44bcc022019-06-18 17:21:36 +01001189 try
arovir01b0717b52018-09-05 17:03:25 +01001190 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001191 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001192 if (IsDynamicTensor(operandTensorInfo))
1193 {
1194 Fail("%s: dynamic input tensors are not supported", __func__);
1195 return LayerInputHandle();
1196 }
arovir01b0717b52018-09-05 17:03:25 +01001197
Sadik Armagan44bcc022019-06-18 17:21:36 +01001198 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001199 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001200 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001201 {
1202 // NOTE: We must check whether we can support the input tensor on at least one
1203 // of the provided backends; otherwise we cannot convert the operation
1204 bool isInputSupported = false;
1205 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1206 IsInputSupported,
1207 data.m_Backends,
1208 isInputSupported,
1209 operandTensorInfo);
1210
1211 if (!isInputSupported)
1212 {
1213 Fail("%s: unsupported input tensor", __func__);
1214 return LayerInputHandle();
1215 }
1216
James Ward4e22f602020-10-20 15:50:33 +01001217 [[clang::fallthrough]]; // intentional fallthrough
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001218 }
1219 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001220 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001221 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001222 // The tensor is either an operand internal to the model, or a model input.
1223 // It can be associated with an ArmNN output slot for an existing layer.
1224
1225 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1226 const uint32_t operandIndex = operation.inputs[inputIndex];
1227 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001228 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001229 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001230 case HalOperandLifeTime::CONSTANT_REFERENCE:
1231 {
1232 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1233 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1234 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001235 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001236 bool isSupported = false;
1237 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1238 IsConstantSupported,
1239 data.m_Backends,
1240 isSupported,
1241 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001242 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001243 {
1244 return LayerInputHandle();
1245 }
1246
1247 armnn::IConnectableLayer* constantLayer =
1248 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1249 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1250 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1251
1252 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1253 }
1254 else
1255 {
1256 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001257 return LayerInputHandle();
1258 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001259 break;
arovir01b0717b52018-09-05 17:03:25 +01001260 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001261 default:
arovir01b0717b52018-09-05 17:03:25 +01001262 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001263 // Unsupported lifetime for an input tensor
1264 Fail("%s: unsupported lifetime for input tensor: %s",
1265 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001266 return LayerInputHandle();
1267 }
arovir01b0717b52018-09-05 17:03:25 +01001268 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001269 }
1270 catch (UnsupportedOperand<HalOperandType>& e)
1271 {
1272 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1273 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001274 }
1275}
1276
Kevin May42477c12020-03-26 13:34:14 +00001277
1278#ifdef ARMNN_ANDROID_NN_V1_3
1279template<typename HalPolicy>
1280LayerInputHandle ConvertToLayerInputHandle(const ::android::hardware::neuralnetworks::V1_3::Operation& operation,
1281 uint32_t inputIndex,
1282 const::android::hardware::neuralnetworks::V1_3::Model& model,
1283 ConversionData& data)
1284{
1285 using HalOperand = typename HalPolicy::Operand;
1286 using HalOperandType = typename HalPolicy::OperandType;
1287 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1288
1289 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
1290 if (!operand)
1291 {
1292 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1293 return LayerInputHandle();
1294 }
1295
1296 if (!IsOperandTypeSupportedForTensors(operand->type))
1297 {
1298 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1299 return LayerInputHandle();
1300 }
1301
1302 try
1303 {
1304 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Finn Williams9a044412020-08-17 19:08:35 +01001305
Kevin May42477c12020-03-26 13:34:14 +00001306 if (IsDynamicTensor(operandTensorInfo))
1307 {
Finn Williams291a16b2020-08-19 22:54:00 +01001308 data.m_DynamicInputsEncountered = true;
1309
Finn Williams9a044412020-08-17 19:08:35 +01001310 const uint32_t operandIndex = operation.inputs[inputIndex];
1311
1312 // Check if the dynamic input tensors have been inferred by one of the previous layers
1313 // If not we can't support them
Finn Williams291a16b2020-08-19 22:54:00 +01001314 if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
Finn Williams9a044412020-08-17 19:08:35 +01001315 {
1316 operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
1317 }
1318 else
1319 {
1320 Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
1321 return LayerInputHandle();
1322 }
Kevin May42477c12020-03-26 13:34:14 +00001323 }
1324
1325 switch (operand->lifetime)
1326 {
1327 case HalOperandLifeTime::SUBGRAPH_INPUT:
1328 {
1329 // NOTE: We must check whether we can support the input tensor on at least one
1330 // of the provided backends; otherwise we cannot convert the operation
1331 bool isInputSupported = false;
1332 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1333 IsInputSupported,
1334 data.m_Backends,
1335 isInputSupported,
1336 operandTensorInfo);
1337
1338 if (!isInputSupported)
1339 {
1340 Fail("%s: unsupported input tensor", __func__);
1341 return LayerInputHandle();
1342 }
1343
James Ward4e22f602020-10-20 15:50:33 +01001344 [[clang::fallthrough]]; // intentional fallthrough
Kevin May42477c12020-03-26 13:34:14 +00001345 }
1346 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1347 case HalOperandLifeTime::SUBGRAPH_OUTPUT:
1348 {
1349 // The tensor is either an operand internal to the model, or a model input.
1350 // It can be associated with an ArmNN output slot for an existing layer.
1351
1352 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1353 const uint32_t operandIndex = operation.inputs[inputIndex];
1354 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1355 }
1356 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
1357 case HalOperandLifeTime::CONSTANT_REFERENCE:
1358 {
1359 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1360 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1361 if (tensorPin.IsValid())
1362 {
1363 bool isSupported = false;
1364 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1365 IsConstantSupported,
1366 data.m_Backends,
1367 isSupported,
1368 tensorPin.GetConstTensor().GetInfo());
1369 if (!isSupported)
1370 {
1371 return LayerInputHandle();
1372 }
1373
1374 armnn::IConnectableLayer* constantLayer =
1375 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1376 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1377 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1378
1379 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1380 }
1381 else
1382 {
1383 Fail("%s: invalid operand tensor", __func__);
1384 return LayerInputHandle();
1385 }
1386 break;
1387 }
1388 default:
1389 {
1390 // Unsupported lifetime for an input tensor
1391 Fail("%s: unsupported lifetime for input tensor: %s",
1392 __func__, toString(operand->lifetime).c_str());
1393 return LayerInputHandle();
1394 }
1395 }
1396 }
1397 catch (UnsupportedOperand<HalOperandType>& e)
1398 {
1399 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1400 return LayerInputHandle();
1401 }
1402}
1403#endif
1404
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001405template<typename HalPolicy,
1406 typename HalOperation = typename HalPolicy::Operation,
1407 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001408bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1409 uint32_t operationOutputIndex,
1410 armnn::IConnectableLayer& layer,
1411 uint32_t layerOutputIndex,
1412 const HalModel& model,
Sadik Armagan813f2302020-05-19 14:10:30 +01001413 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001414 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001415 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001416 const ActivationFn& activationFunction = ActivationFn::kActivationNone,
Sadik Armagandbda4b72020-09-03 11:33:07 +01001417 bool inferOutputShapes = false)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001418{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001419 using HalOperand = typename HalPolicy::Operand;
1420
1421 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001422 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1423 {
1424 return false;
1425 }
1426
1427 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001428 if (overrideOutputInfo == nullptr)
1429 {
1430 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1431 }
1432 else
1433 {
1434 outputSlot.SetTensorInfo(*overrideOutputInfo);
1435 }
1436
Finn Williamsa4983ce2020-07-23 12:55:12 +01001437 bool isSupported = false;
Sadik Armagandbda4b72020-09-03 11:33:07 +01001438 if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
Sadik Armagan813f2302020-05-19 14:10:30 +01001439 {
Sadik Armagandbda4b72020-09-03 11:33:07 +01001440 // Type one dynamic tensors require the previous layer's output shape for inference
1441 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1442 {
1443 if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1444 {
1445 return false;
1446 }
1447 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001448 // IsTensorInfoSet will infer the dynamic output shape
Finn Williamsa4983ce2020-07-23 12:55:12 +01001449 outputSlot.IsTensorInfoSet();
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001450 // Once the shape is inferred we can validate it
Finn Williamsa4983ce2020-07-23 12:55:12 +01001451 validateFunc(outputSlot.GetTensorInfo(), isSupported);
1452
Sadik Armagandbda4b72020-09-03 11:33:07 +01001453 if(!isSupported)
1454 {
1455 for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1456 {
1457 layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1458 }
1459 return false;
1460 }
Sadik Armagan813f2302020-05-19 14:10:30 +01001461 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001462
1463 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001464
Kevin Mayfcf2a152020-09-08 16:06:32 +01001465 if (activationFunction != ActivationFn::kActivationNone)
1466 {
1467 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1468 armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1469 &layer, data);
1470
1471 if (!endLayer)
1472 {
1473 return Fail("%s: ProcessActivation failed", __func__);
1474 }
1475
1476 armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1477 data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1478 }
1479 else
1480 {
1481 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1482 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001483
1484 return true;
1485}
1486
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001487template<typename HalPolicy,
1488 typename HalOperation = typename HalPolicy::Operation,
1489 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001490armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1491 uint32_t inputIndex,
1492 const HalModel& model,
1493 ConversionData& data)
1494{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001495 using HalOperand = typename HalPolicy::Operand;
1496
1497 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001498 if (!operand)
1499 {
1500 return armnn::DataLayout::NHWC;
1501 }
1502
1503 if (!IsBool(*operand))
1504 {
1505 return armnn::DataLayout::NHWC;
1506 }
1507
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001508 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001509 if (!valueAddress)
1510 {
1511 return armnn::DataLayout::NHWC;
1512 }
1513
1514 if (*(static_cast<const bool*>(valueAddress)))
1515 {
1516 return armnn::DataLayout::NCHW;
1517 }
1518 else
1519 {
1520 return armnn::DataLayout::NHWC;
1521 }
1522}
1523
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001524template<typename HalPolicy,
1525 typename HalOperation = typename HalPolicy::Operation,
1526 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001527bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1528 uint32_t outputIndex,
1529 armnn::IConnectableLayer& layer,
1530 const HalModel& model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001531 ConversionData& data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001532 const armnn::TensorInfo* overrideOutputInfo = nullptr,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001533 const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1534 const ActivationFn& activationFunction = ActivationFn::kActivationNone)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001535{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001536 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1537 outputIndex,
1538 layer,
1539 outputIndex,
1540 model,
Finn Williamsfc884b42020-06-11 17:35:44 +01001541 data,
Finn Williamsa4983ce2020-07-23 12:55:12 +01001542 overrideOutputInfo,
Kevin Mayfcf2a152020-09-08 16:06:32 +01001543 validateFunc,
1544 activationFunction);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001545}
1546
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001547template<typename HalPolicy,
1548 typename HalOperation = typename HalPolicy::Operation,
1549 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001550bool ConvertToActivation(const HalOperation& operation,
1551 const char* operationName,
1552 const armnn::ActivationDescriptor& activationDesc,
1553 const HalModel& model,
1554 ConversionData& data)
1555{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001556 using HalOperand = typename HalPolicy::Operand;
1557
1558 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001559 if (!input.IsValid())
1560 {
1561 return Fail("%s: Input 0 is invalid", operationName);
1562 }
1563
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001564 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001565 if (!outputOperand)
1566 {
1567 return false;
1568 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001569
1570 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001571
1572 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001573
1574 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
1575 {
1576 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1577 IsActivationSupported,
1578 data.m_Backends,
1579 isSupported,
1580 input.GetTensorInfo(),
1581 outInfo,
1582 activationDesc);
1583 };
1584
1585 if(IsDynamicTensor(outInfo))
1586 {
1587 isSupported = AreDynamicTensorsSupported();
1588 }
1589 else
1590 {
1591 validateFunc(outInfo, isSupported);
1592 }
1593
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001594 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001595 {
1596 return false;
1597 }
1598
1599 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01001600 ARMNN_ASSERT(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001601 input.Connect(layer->GetInputSlot(0));
1602
Finn Williamsa4983ce2020-07-23 12:55:12 +01001603 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
arovir01b0717b52018-09-05 17:03:25 +01001604}
1605
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001606template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001607 typename HalOperation = typename HalPolicy::Operation,
1608 typename HalModel = typename HalPolicy::Model>
1609bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1610{
1611 armnn::ActivationDescriptor desc;
1612 desc.m_Function = armnn::ActivationFunction::ReLu;
1613
1614 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1615}
1616
1617template<typename HalPolicy,
1618 typename HalOperation = typename HalPolicy::Operation,
1619 typename HalModel = typename HalPolicy::Model>
1620bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1621{
1622 armnn::ActivationDescriptor desc;
1623 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1624 desc.m_A = 1.0f;
1625 desc.m_B = -1.0f;
1626
1627 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1628}
1629
1630template<typename HalPolicy,
1631 typename HalOperation = typename HalPolicy::Operation,
1632 typename HalModel = typename HalPolicy::Model>
1633bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1634{
1635 armnn::ActivationDescriptor desc;
1636 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1637 desc.m_A = 6.0f;
1638
1639 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1640}
1641
1642template<typename HalPolicy,
1643 typename HalOperation = typename HalPolicy::Operation,
1644 typename HalModel = typename HalPolicy::Model>
1645bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1646{
1647 armnn::ActivationDescriptor desc;
1648 desc.m_Function = armnn::ActivationFunction::TanH;
1649 desc.m_A = 1.0f; // android nn does not support tanH parameters
1650 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1651
1652 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1653}
1654
1655template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001656 typename HalOperation = typename HalPolicy::Operation,
1657 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001658bool ConvertPaddings(const HalOperation& operation,
1659 const HalModel& model,
1660 ConversionData& data,
1661 unsigned int rank,
1662 armnn::PadDescriptor& padDescriptor)
1663{
1664 using HalOperand = typename HalPolicy::Operand;
1665
1666 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1667 if (!paddingsOperand)
1668 {
1669 return Fail("%s: Could not read paddings operand", __func__);
1670 }
1671
1672 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1673 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1674 {
1675 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1676 }
1677
1678 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00001679 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
1680 {
1681 return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
1682 }
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001683
1684 // add padding for each dimension of input tensor.
1685 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1686 {
1687 int paddingBeforeInput = paddings[i];
1688 int paddingAfterInput = paddings[i + 1];
1689
1690 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1691 {
1692 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1693 }
1694
1695 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1696 }
1697
1698 return true;
1699}
1700
1701template<typename HalPolicy,
1702 typename HalOperation = typename HalPolicy::Operation,
1703 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001704bool ConvertPooling2d(const HalOperation& operation,
1705 const char* operationName,
1706 armnn::PoolingAlgorithm poolType,
1707 const HalModel& model,
1708 ConversionData& data)
1709{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001710 using HalOperand = typename HalPolicy::Operand;
1711 using HalOperandType = typename HalPolicy::OperandType;
1712
1713 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001714 if (!input.IsValid())
1715 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001716 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001717 }
1718
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001719 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001720 if (!output)
1721 {
1722 return Fail("%s: Could not read output 0", __func__);
1723 }
1724
1725 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1726 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1727
arovir01b0717b52018-09-05 17:03:25 +01001728 armnn::Pooling2dDescriptor desc;
1729 desc.m_PoolType = poolType;
1730 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001731 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001732
1733 ActivationFn activation;
1734
Sadik Armagan15d63e22019-07-26 16:59:35 +01001735 auto inputSize = operation.inputs.size();
1736
1737 if (inputSize >= 10)
1738 {
1739 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1740 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1741 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1742 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1743 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1744 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1745 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1746 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1747 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1748 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1749 {
1750 return Fail("%s: Operation has invalid inputs", operationName);
1751 }
1752
Kevin May42477c12020-03-26 13:34:14 +00001753 if (Is12OrLaterOperand(*output))
Sadik Armagan15d63e22019-07-26 16:59:35 +01001754 {
1755 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1756 }
1757 }
1758 else
arovir01b0717b52018-09-05 17:03:25 +01001759 {
1760 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1761 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001762 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1763 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1764 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1765 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1766 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1767 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001768 {
1769 return Fail("%s: Operation has invalid inputs", operationName);
1770 }
1771
Kevin May42477c12020-03-26 13:34:14 +00001772 if (Is12OrLaterOperand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001773 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001774 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001775 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001776
1777 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1778 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1779 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1780
1781 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1782 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001783 }
1784
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001785 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001786
1787 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1788 {
1789 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1790 IsPooling2dSupported,
1791 data.m_Backends,
1792 isSupported,
1793 inputInfo,
1794 outputInfo,
1795 desc);
1796
1797 };
1798
1799 if(IsDynamicTensor(outputInfo))
1800 {
1801 isSupported = AreDynamicTensorsSupported();
1802 }
1803 else
1804 {
1805 validateFunc(outputInfo, isSupported);
1806 }
1807
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001808 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001809 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001810 return false;
arovir01b0717b52018-09-05 17:03:25 +01001811 }
arovir01b0717b52018-09-05 17:03:25 +01001812
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001813 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1814 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001815 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001816 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001817 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001818
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001819 input.Connect(pooling2dLayer->GetInputSlot(0));
1820
Finn Williamsa4983ce2020-07-23 12:55:12 +01001821 if (!isSupported)
1822 {
1823 return false;
1824 }
1825
Kevin Mayfcf2a152020-09-08 16:06:32 +01001826 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *pooling2dLayer, model,
1827 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001828}
1829
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001830template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001831 typename HalOperation = typename HalPolicy::Operation,
1832 typename HalModel = typename HalPolicy::Model>
1833bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001834{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001835 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001836
1837 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1838 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1839
1840 if (!input0.IsValid() || !input1.IsValid())
1841 {
1842 return Fail("%s: Operation has invalid inputs", __func__);
1843 }
1844
1845 // The FuseActivation parameter is always the input index 2
1846 // and it should be optional
1847 ActivationFn activationFunction;
1848 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1849 {
1850 return Fail("%s: Operation has invalid inputs", __func__);
1851 }
1852
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001853 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001854 if (!outputOperand)
1855 {
1856 return false;
1857 }
1858
1859 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1860 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1861
1862 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01001863
1864 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01001865 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1866 {
1867 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1868 IsAdditionSupported,
1869 data.m_Backends,
1870 isSupported,
1871 inputInfo0,
1872 inputInfo1,
1873 outputInfo);
1874 };
1875
1876 if(!IsDynamicTensor(outputInfo))
1877 {
1878 validateFunc(outputInfo, isSupported);
1879 }
1880 else
1881 {
1882 isSupported = AreDynamicTensorsSupported();
1883 }
1884
Mike Kelly46272802019-08-14 17:00:48 +01001885 if (!isSupported)
1886 {
1887 return false;
1888 }
1889
1890 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01001891
Kevin Mayfcf2a152020-09-08 16:06:32 +01001892 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1893 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01001894 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01001895 return false;
1896 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01001897
Kevin Mayfcf2a152020-09-08 16:06:32 +01001898 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
1899 data, nullptr, validateFunc, activationFunction);
1900
Mike Kelly46272802019-08-14 17:00:48 +01001901}
1902
1903template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001904 typename HalOperation = typename HalPolicy::Operation,
1905 typename HalModel = typename HalPolicy::Model>
1906bool ConvertArgMinMax(const HalOperation& operation,
1907 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001908 ConversionData& data,
1909 armnn::ArgMinMaxFunction argMinMaxFunction)
1910{
1911 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1912
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001913 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001914 using HalOperandType = typename HalPolicy::OperandType;
1915
1916 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1917
1918 if (!input0.IsValid())
1919 {
1920 return Fail("%s: Operation has invalid inputs", __func__);
1921 }
1922
1923 int32_t axis;
1924 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1925 {
1926 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1927 }
1928
1929 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1930 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1931
1932 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1933 {
1934 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1935 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1936 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1937 return Fail("%s: Axis must be in range [-n, n)", __func__);
1938 }
1939
1940 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1941 if (!output)
1942 {
1943 return Fail("%s: Could not read output 0", __func__);
1944 }
1945
1946 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1947
1948 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001949
1950 armnn::ArgMinMaxDescriptor descriptor;
1951 descriptor.m_Function = argMinMaxFunction;
1952 descriptor.m_Axis = axis;
1953
1954 bool isSupported = false;
Finn Williamsa4983ce2020-07-23 12:55:12 +01001955
1956 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1957 {
1958 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1959 IsArgMinMaxSupported,
1960 data.m_Backends,
1961 isSupported,
1962 inputInfo0,
1963 outputInfo,
1964 descriptor);
1965 };
1966
1967 if(IsDynamicTensor(outputInfo))
1968 {
1969 isSupported = AreDynamicTensorsSupported();
1970 }
1971 else
1972 {
1973 validateFunc(outputInfo, isSupported);
1974 }
1975
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001976 if (!isSupported)
1977 {
1978 return false;
1979 }
1980
1981 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1982 assert(layer != nullptr);
1983
1984 input0.Connect(layer->GetInputSlot(0));
1985
Finn Williamsa4983ce2020-07-23 12:55:12 +01001986 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001987}
1988
1989template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001990 typename HalOperation = typename HalPolicy::Operation,
1991 typename HalModel = typename HalPolicy::Model>
1992bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001993{
Keith Davis6e4081f2020-09-03 13:17:21 +01001994 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001995 using HalOperandType = typename HalPolicy::OperandType;
1996
1997 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1998 if (operation.inputs.size() <= 1)
1999 {
2000 return Fail("%s: Operation has insufficient arguments", __func__);
2001 }
2002
2003 // Get inputs and outputs
2004 const std::size_t numInputTensors = operation.inputs.size() - 1;
2005
2006 int32_t concatDim;
2007 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
2008 {
2009 return Fail("%s: Operation has invalid inputs", __func__);
2010 }
2011
2012 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2013 if (!outputOperand)
2014 {
2015 return Fail("%s: Operation has no outputs", __func__);
2016 }
2017
Keith Davis6e4081f2020-09-03 13:17:21 +01002018 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
2019 armnn::TensorShape outputShape = outputInfo.GetShape();
2020 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002021 //
2022 // handle negative concat dims along the lines of tensorflow as described here:
2023 // https://www.tensorflow.org/api_docs/python/tf/concat
2024 // "negative axis refers to axis + rank(values)-th dimension"
2025 //
2026 if (concatDim < 0)
2027 {
2028 concatDim += outputShape.GetNumDimensions();
2029 }
2030
2031 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
2032 {
2033 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
2034 }
2035
2036 std::vector<LayerInputHandle> inputHandles;
2037 std::vector<armnn::TensorShape> inputShapes;
2038
2039 inputHandles.reserve(numInputTensors);
2040 inputShapes.reserve(numInputTensors);
2041
Keith Davis6e4081f2020-09-03 13:17:21 +01002042 bool inputsHaveBeenReshaped = false;
2043 unsigned int tensorDimensionsAdded = 0;
Mike Kellyb8805202019-07-31 17:25:43 +01002044 for (uint32_t i = 0; i < numInputTensors; ++i)
2045 {
2046 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
2047 if (!operand)
2048 {
2049 return Fail("%s: Operation has invalid inputs", __func__);
2050 }
2051
Teresa Charlin3b959602019-10-31 17:05:47 +00002052 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
2053 if (!operandInputHandle.IsValid())
2054 {
2055 return Fail("%s: Operation has invalid inputs", __func__);
2056 }
Mike Kellyb8805202019-07-31 17:25:43 +01002057
Keith Davis6e4081f2020-09-03 13:17:21 +01002058 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01002059 if (operandShape.GetNumDimensions() == 0)
2060 {
2061 return Fail("%s: Operands with rank 0 are not supported", __func__);
2062 }
2063
2064 if (RequiresReshape(operandShape))
2065 {
2066 inputsHaveBeenReshaped = true;
2067
2068 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
2069
2070 // Expand the tensor to three dimensions
2071 if (operandShape.GetNumDimensions() == 2)
2072 {
2073 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
2074 tensorDimensionsAdded = 1;
2075 }
2076 else
2077 {
2078 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
2079 tensorDimensionsAdded = 2;
2080 }
2081
Kevin Mayaed08ac2019-12-12 16:33:31 +00002082 armnn::ReshapeDescriptor reshapeDescriptor;
2083 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
2084
2085 bool isSupported = false;
2086 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2087 IsReshapeSupported,
2088 data.m_Backends,
2089 isSupported,
2090 operandInputHandle.GetTensorInfo(),
2091 reshapeInfo,
2092 reshapeDescriptor);
Keith Davis6e4081f2020-09-03 13:17:21 +01002093
Kevin Mayaed08ac2019-12-12 16:33:31 +00002094 if (!isSupported)
2095 {
2096 return false;
2097 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002098 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002099
2100 // Point to the reshape operation rather then the input operation
Keith Davis6e4081f2020-09-03 13:17:21 +01002101 operandShape = reshapeInfo.GetShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002102 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
2103 }
2104
2105 inputShapes.emplace_back(operandShape);
2106 inputHandles.emplace_back(operandInputHandle);
2107
2108 if (!inputHandles.back().IsValid())
2109 {
2110 return Fail("%s: Operation has invalid inputs", __func__);
2111 }
2112 }
2113
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002114 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
Mike Kellyb8805202019-07-31 17:25:43 +01002115
2116 if (inputsHaveBeenReshaped)
2117 {
2118 // Adjust the concatenation dimension by the amount of dimensions added (if any)
2119 concatDim += tensorDimensionsAdded;
2120
2121 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
2122 if (tensorDimensionsAdded == 1)
2123 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002124 if (IsDynamicTensor(outputInfo))
2125 {
2126 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
2127 }
2128 else
2129 {
2130 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
2131 }
Mike Kellyb8805202019-07-31 17:25:43 +01002132 }
2133 else if (tensorDimensionsAdded == 2)
2134 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002135 if (IsDynamicTensor(outputInfo))
2136 {
2137 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
2138 }
2139 else
2140 {
2141 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
2142 }
Mike Kellyb8805202019-07-31 17:25:43 +01002143 }
2144 }
2145
2146 // Check if permutations is required and get the pair of permutations required for the concatenation.
2147 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
2148 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
Keith Davis6e4081f2020-09-03 13:17:21 +01002149 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
Keith Davis6e4081f2020-09-03 13:17:21 +01002150 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
2151 concatDim,
2152 permutationPair);
Mike Kellyb8805202019-07-31 17:25:43 +01002153
Keith Davis6e4081f2020-09-03 13:17:21 +01002154 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
2155 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002156 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002157 if (needPermute)
2158 {
2159 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
2160 }
2161
2162 outputInfo.SetShape(outputShape);
Mike Kellyb8805202019-07-31 17:25:43 +01002163 }
Mike Kellyb8805202019-07-31 17:25:43 +01002164 // this is no-op for identity swizzles, otherwise it replaces both
2165 // the handles and shapes with the swizzled layer output handles and shapes
Teresa Charlin185f5882020-04-06 21:59:18 +01002166 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
Kevin Mayaed08ac2019-12-12 16:33:31 +00002167 {
2168 return false;
2169 }
Mike Kellyb8805202019-07-31 17:25:43 +01002170
2171 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
2172 armnn::OriginsDescriptor concatDescriptor;
2173
2174 try
2175 {
2176 // The concat descriptor is always created across the only supported concat dimension
2177 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002178 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
2179 inputShapes.end(),
2180 concatDim);
2181 } catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01002182 {
2183 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
2184 }
2185
2186 // Validate the output shape is correct given the input shapes based on the
2187 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Keith Davis6e4081f2020-09-03 13:17:21 +01002188 if (!isDynamicTensor)
Mike Kellyb8805202019-07-31 17:25:43 +01002189 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002190 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
2191 {
2192 return Fail("%s: Error validating the output shape for concat", __func__);
2193 }
Mike Kellyb8805202019-07-31 17:25:43 +01002194 }
2195
2196 std::vector<const armnn::TensorInfo*> inputTensorInfos;
2197 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
Keith Davis6e4081f2020-09-03 13:17:21 +01002198 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Mike Kellyb8805202019-07-31 17:25:43 +01002199
Keith Davis6e4081f2020-09-03 13:17:21 +01002200 bool isSupported = false;
2201 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
2202 FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
2203 outputInfo, concatDescriptor);
2204 };
2205
2206 if (!isDynamicTensor)
2207 {
2208 validateFunc(outputInfo, isSupported);
2209 }
2210 else
2211 {
2212 isSupported = AreDynamicTensorsSupported();
2213 }
2214
Mike Kellyb8805202019-07-31 17:25:43 +01002215 if (!isSupported)
2216 {
2217 return false;
2218 }
2219
2220 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
2221 assert(layer != nullptr);
2222 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
Mike Kellyb8805202019-07-31 17:25:43 +01002223 // Connect inputs to the layer
2224 const int numInputSlots = layer->GetNumInputSlots();
2225 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
2226 for (int i = 0; i < numInputSlots; ++i)
2227 {
2228 // connect the input directly to the merge (concat) layer
2229 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
2230 }
2231
Keith Davis6e4081f2020-09-03 13:17:21 +01002232 // Transpose the output shape
2233 auto transposeOutputShape = [&](){
Mike Kelly4a956582020-02-28 10:32:09 +00002234 armnn::TransposeDescriptor transposeDesc;
2235 transposeDesc.m_DimMappings = permutationPair.second;
Teresa Charlin185f5882020-04-06 21:59:18 +01002236 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
2237 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
2238 permutationPair.second);
Keith Davis6e4081f2020-09-03 13:17:21 +01002239 isSupported = false;
Kevin Mayaed08ac2019-12-12 16:33:31 +00002240 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly4a956582020-02-28 10:32:09 +00002241 IsTransposeSupported,
Kevin Mayaed08ac2019-12-12 16:33:31 +00002242 data.m_Backends,
2243 isSupported,
Teresa Charlin185f5882020-04-06 21:59:18 +01002244 inputTransposeInfo,
2245 outputTransposeInfo,
Mike Kelly4a956582020-02-28 10:32:09 +00002246 transposeDesc);
Kevin Mayaed08ac2019-12-12 16:33:31 +00002247 if (!isSupported)
2248 {
2249 return false;
2250 }
Mike Kellyb8805202019-07-31 17:25:43 +01002251 // Add permutation layer and connect the output to it, the permutation becomes the output layer
Keith Davis6e4081f2020-09-03 13:17:21 +01002252 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
Mike Kelly4a956582020-02-28 10:32:09 +00002253 permutationPair.second);
Mike Kellyb8805202019-07-31 17:25:43 +01002254 layer = &deswizzleLayer;
Keith Davis6e4081f2020-09-03 13:17:21 +01002255
2256 return true;
2257 };
2258
2259 if (needPermute && !isDynamicTensor)
2260 {
2261 transposeOutputShape();
Mike Kellyb8805202019-07-31 17:25:43 +01002262 }
2263
2264 if (inputsHaveBeenReshaped)
2265 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002266 if (isDynamicTensor)
2267 {
2268 // Infer the output shapes of concat if outputs are type 1 dynamic
David Monahan7f492ac2020-10-16 10:36:29 +01002269 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
Keith Davis6e4081f2020-09-03 13:17:21 +01002270 if (!ValidateConcatOutputShape(inputShapes,
2271 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
2272 concatDim))
2273 {
2274 return Fail("%s: Error validating the output shape for concat", __func__);
2275 }
2276 transposeOutputShape();
2277 }
Mike Kellyb8805202019-07-31 17:25:43 +01002278
Mike Kellyb8805202019-07-31 17:25:43 +01002279 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Mike Kellyb8805202019-07-31 17:25:43 +01002280 // Undo the reshape knowing the amount of dimensions added
2281 if (tensorDimensionsAdded == 1)
2282 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002283 afterConcatInfo.SetShape(
2284 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002285 }
2286 else if (tensorDimensionsAdded == 2)
2287 {
Keith Davis6e4081f2020-09-03 13:17:21 +01002288 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
Mike Kellyb8805202019-07-31 17:25:43 +01002289 }
2290
Kevin Mayaed08ac2019-12-12 16:33:31 +00002291 armnn::ReshapeDescriptor reshapeDescriptor;
2292 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
Keith Davis6e4081f2020-09-03 13:17:21 +01002293 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
Kevin Mayaed08ac2019-12-12 16:33:31 +00002294
Keith Davis6e4081f2020-09-03 13:17:21 +01002295 isSupported = false;
2296 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
2297 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2298 IsReshapeSupported,
2299 data.m_Backends,
2300 isSupported,
2301 concatInfo,
2302 afterConcatInfo,
2303 reshapeDescriptor);
2304 };
2305
2306 if (!IsDynamicTensor(afterConcatInfo))
2307 {
2308 validateReshapeFunc(afterConcatInfo, isSupported);
2309 }
2310 else
2311 {
2312 isSupported = AreDynamicTensorsSupported();
2313 }
2314
Kevin Mayaed08ac2019-12-12 16:33:31 +00002315 if (!isSupported)
2316 {
2317 return false;
2318 }
Keith Davis6e4081f2020-09-03 13:17:21 +01002319 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
2320 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
2321 0,
2322 *layer,
2323 model,
2324 data,
2325 nullptr,
2326 validateReshapeFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002327 }
2328
Keith Davis6e4081f2020-09-03 13:17:21 +01002329 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kellyb8805202019-07-31 17:25:43 +01002330}
2331
2332template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002333 typename HalOperation = typename HalPolicy::Operation,
2334 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002335bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2336{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002337 using HalOperand = typename HalPolicy::Operand;
2338 using HalOperandType = typename HalPolicy::OperandType;
2339
2340 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002341 if (!input.IsValid())
2342 {
2343 return Fail("%s: Operation has invalid inputs", __func__);
2344 }
2345
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002346 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002347 if (!output)
2348 {
2349 return Fail("%s: Could not read output 0", __func__);
2350 }
2351
2352 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002353 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002354
2355 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002356 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
2357 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002358
2359 if (!weightsPin.IsValid() || !biasPin.IsValid())
2360 {
2361 return Fail("%s: Operation has invalid inputs", __func__);
2362 }
2363
2364 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002365 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01002366 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2367
2368 armnn::Convolution2dDescriptor desc;
2369 desc.m_DataLayout = armnn::DataLayout::NHWC;
2370 ActivationFn activation;
2371
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002372 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002373 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002374 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2375 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2376 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2377 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2378 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2379 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002380 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002381 {
2382 return Fail("%s: Operation has invalid inputs", __func__);
2383 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002384 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002385 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002386 {
2387 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002388 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2389 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2390 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002391 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002392 {
2393 return Fail("%s: Operation has invalid inputs", __func__);
2394 }
2395
2396 const uint32_t kernelX = weights.GetShape()[2];
2397 const uint32_t kernelY = weights.GetShape()[1];
2398 const uint32_t inputX = inputInfo.GetShape()[2];
2399 const uint32_t inputY = inputInfo.GetShape()[1];
2400
2401 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2402 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002403 }
2404 else
2405 {
2406 return Fail("%s: Unsupported number of operation inputs", __func__);
2407 }
2408
2409 desc.m_BiasEnabled = true;
2410 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2411
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002412 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002413 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2414 {
2415 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2416 IsConvolution2dSupported,
2417 data.m_Backends,
2418 isSupported,
2419 inputInfo,
2420 outputInfo,
2421 desc,
2422 weights.GetInfo(),
2423 biases);
2424 };
2425
2426 if(!IsDynamicTensor(outputInfo))
2427 {
2428 validateFunc(outputInfo, isSupported);
2429 }
2430 else
2431 {
2432 isSupported = AreDynamicTensorsSupported();
2433 }
2434
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002435 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002436 {
2437 return false;
2438 }
2439
2440 armnn::IConnectableLayer* startLayer =
2441 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2442
2443 if (!startLayer)
2444 {
2445 return Fail("%s: AddConvolution2dLayer failed", __func__);
2446 }
2447
Mike Kellyb5fdf382019-06-11 16:35:25 +01002448 input.Connect(startLayer->GetInputSlot(0));
2449
Kevin Mayfcf2a152020-09-08 16:06:32 +01002450 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2451 data, nullptr, validateFunc, activation);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002452}
2453
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002454template<typename HalPolicy,
2455 typename HalOperation = typename HalPolicy::Operation,
2456 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002457bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2458{
2459 using HalOperand = typename HalPolicy::Operand;
2460 using HalOperandType = typename HalPolicy::OperandType;
2461
2462 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2463 if (!input.IsValid() )
2464 {
2465 return Fail("%s: Operation has invalid inputs", __func__);
2466 }
2467
2468 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2469 unsigned int rank = inputInfo.GetNumDimensions();
2470 if (rank != 4)
2471 {
2472 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2473 }
2474
2475 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2476 if (!output)
2477 {
2478 return Fail("%s: Could not read output 0", __func__);
2479 }
2480
2481 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002482
2483 armnn::DepthToSpaceDescriptor descriptor;
2484
2485 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2486 if (descriptor.m_BlockSize <= 1)
2487 {
2488 return Fail("%s: Block size must be at least 1 in all dimensions");
2489 }
2490
2491 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
Kevin May42477c12020-03-26 13:34:14 +00002492 if (Is12OrLaterOperand(*output))
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002493 {
2494 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2495 }
2496
2497 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002498 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2499 {
2500 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2501 IsDepthToSpaceSupported,
2502 data.m_Backends,
2503 isSupported,
2504 inputInfo,
2505 outputInfo,
2506 descriptor);
2507 };
2508
2509 if(!IsDynamicTensor(outputInfo))
2510 {
2511 validateFunc(outputInfo, isSupported);
2512 }
2513 else
2514 {
2515 isSupported = AreDynamicTensorsSupported();
2516 }
2517
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002518 if (!isSupported)
2519 {
2520 return false;
2521 }
2522
2523 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2524 assert(layer != nullptr);
2525 input.Connect(layer->GetInputSlot(0));
2526
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002527 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002528}
2529
2530template<typename HalPolicy,
2531 typename HalOperation = typename HalPolicy::Operation,
2532 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002533bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2534{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002535 using HalOperand = typename HalPolicy::Operand;
2536 using HalOperandType = typename HalPolicy::OperandType;
2537
2538 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002539
2540 if (!input.IsValid())
2541 {
2542 return Fail("%s: Operation has invalid inputs", __func__);
2543 }
2544
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002545 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002546
2547 if (!output)
2548 {
2549 return Fail("%s: Could not read output 0", __func__);
2550 }
2551
2552 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002553 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002554
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002555 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002556 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002557 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002558
2559 if (weightsOperand == nullptr)
2560 {
2561 return Fail("%s: Operand is invalid", __func__);
2562 }
2563 armnn::DepthwiseConvolution2dDescriptor desc;
2564 desc.m_DataLayout = armnn::DataLayout::NHWC;
2565
Mike Kellyb5fdf382019-06-11 16:35:25 +01002566 // Reinterpret weight data as [ H, W, I, M ]
2567 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2568 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002569 inputInfo.GetShape()[3],
2570 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002571
2572 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2573 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2574
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002575 const ConstTensorPin weightsPin =
2576 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2577 1,
2578 model,
2579 data,
2580 HWIMToMIHW,
2581 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002582
2583 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002584 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002585
2586 if (!weightsPin.IsValid() || !biasPin.IsValid())
2587 {
2588 return Fail("%s: Operation has invalid inputs", __func__);
2589 }
2590
2591 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2592 armnn::ConstTensor bias = biasPin.GetConstTensor();
2593 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2594
2595 ActivationFn activation;
2596
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002597 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002598 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002599 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2600 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2601 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2602 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2603 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2604 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002605 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002606 {
2607 return Fail("%s: Operation has invalid inputs", __func__);
2608 }
2609 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002610 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002611 {
2612 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002613 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2614 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2615 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002616 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002617 {
2618 return Fail("%s: Operation has invalid inputs", __func__);
2619 }
2620
2621 const uint32_t kernelX = weights.GetShape()[3];
2622 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002623 const uint32_t inputX = inputInfo.GetShape()[2];
2624 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002625
2626 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2627 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2628 }
2629 else
2630 {
2631 return Fail("%s: Unsupported number of operation inputs", __func__);
2632 }
2633
2634 desc.m_BiasEnabled = true;
2635 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2636
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002637 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002638 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2639 {
2640 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2641 IsDepthwiseConvolutionSupported,
2642 data.m_Backends,
2643 isSupported,
2644 inputInfo,
2645 outputInfo,
2646 desc,
2647 weights.GetInfo(),
2648 biases);
2649 };
2650
2651 if(!IsDynamicTensor(outputInfo))
2652 {
2653 validateFunc(outputInfo, isSupported);
2654 }
2655 else
2656 {
2657 isSupported = AreDynamicTensorsSupported();
2658 }
2659
2660
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002661 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002662 {
2663 return false;
2664 }
2665
2666 armnn::IConnectableLayer* startLayer =
2667 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2668 if (!startLayer)
2669 {
2670 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2671 }
2672
Mike Kellyb5fdf382019-06-11 16:35:25 +01002673 input.Connect(startLayer->GetInputSlot(0));
2674
Kevin Mayfcf2a152020-09-08 16:06:32 +01002675 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2676 data, nullptr, validateFunc, activation);
arovir01b0717b52018-09-05 17:03:25 +01002677}
2678
Mike Kelly3c673942019-07-25 09:26:06 +01002679template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002680 typename HalOperation = typename HalPolicy::Operation,
2681 typename HalModel = typename HalPolicy::Model>
2682bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002683{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002684 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002685
2686 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2687 if (!input.IsValid())
2688 {
2689 return Fail("%s: Operation has invalid input", __func__);
2690 }
2691
Sadik Armagan98c0f662019-11-21 15:54:36 +00002692 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2693 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2694 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2695 {
2696 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2697 }
2698
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002699 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002700 if (!outputOperand)
2701 {
2702 return Fail("%s: Operation has invalid outputs", __func__);
2703 }
2704
2705 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002706
2707 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002708 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2709 {
2710 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2711 IsDequantizeSupported,
2712 data.m_Backends,
2713 isSupported,
2714 inputInfo,
2715 outputInfo);
2716 };
2717
2718 if(IsDynamicTensor(outputInfo))
2719 {
2720 isSupported = AreDynamicTensorsSupported();
2721 }
2722 else
2723 {
2724 validateFunc(outputInfo, isSupported);
2725 }
2726
Mike Kelly46272802019-08-14 17:00:48 +01002727 if (!isSupported)
2728 {
2729 return false;
2730 }
2731
2732 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2733 assert(layer != nullptr);
2734 input.Connect(layer->GetInputSlot(0));
2735
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002736 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002737}
2738
2739template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002740 typename HalOperation = typename HalPolicy::Operation,
2741 typename HalModel = typename HalPolicy::Model>
2742bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002743{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002744 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002745
2746 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2747 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2748
2749 if (!input0.IsValid() || !input1.IsValid())
2750 {
2751 return Fail("%s: Operation has invalid inputs", __func__);
2752 }
2753
2754 // The FuseActivation parameter is always the input index 2
2755 // and it should be optional
2756 ActivationFn activationFunction;
2757 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2758 {
2759 return Fail("%s: Operation has invalid inputs", __func__);
2760 }
2761
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002762 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002763 if (!output)
2764 {
2765 return Fail("%s: Could not read output 0", __func__);
2766 }
2767
2768 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01002769
2770 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002771 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2772 {
2773 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2774 IsDivisionSupported,
2775 data.m_Backends,
2776 isSupported,
2777 input0.GetTensorInfo(),
2778 input1.GetTensorInfo(),
2779 outputInfo);
2780 };
2781
2782 if(!IsDynamicTensor(outputInfo))
2783 {
2784 validateFunc(outputInfo, isSupported);
2785 }
2786 else
2787 {
2788 isSupported = AreDynamicTensorsSupported();
2789 }
2790
Mike Kelly46272802019-08-14 17:00:48 +01002791 if (!isSupported)
2792 {
2793 return false;
2794 }
2795
2796 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Mike Kelly46272802019-08-14 17:00:48 +01002797
Kevin Mayfcf2a152020-09-08 16:06:32 +01002798 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2799 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01002800 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01002801 return false;
Mike Kelly46272802019-08-14 17:00:48 +01002802 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01002803
2804 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
2805 data, nullptr, validateFunc, activationFunction);
2806
Mike Kelly46272802019-08-14 17:00:48 +01002807}
2808
2809template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002810 typename HalOperation = typename HalPolicy::Operation,
2811 typename HalModel = typename HalPolicy::Model>
2812bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002813{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002814 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002815
2816 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2817 if (!input.IsValid())
2818 {
2819 return Fail("%s: Operation has invalid inputs", __func__);
2820 }
2821
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002822 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002823 if (!outputOperand)
2824 {
2825 return Fail("%s: Operation has invalid outputs", __func__);
2826 }
2827
2828 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01002829
2830 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002831 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2832 {
2833 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2834 IsFloorSupported,
2835 data.m_Backends,
2836 isSupported,
2837 input.GetTensorInfo(),
2838 outputInfo);
2839 };
2840
2841 if(!IsDynamicTensor(outputInfo))
2842 {
2843 validateFunc(outputInfo, isSupported);
2844 }
2845 else
2846 {
2847 isSupported = AreDynamicTensorsSupported();
2848 }
2849
Mike Kelly46272802019-08-14 17:00:48 +01002850 if (!isSupported)
2851 {
2852 return false;
2853 }
2854
2855 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2856 assert(layer != nullptr);
2857 input.Connect(layer->GetInputSlot(0));
2858
Teresa Charlin4bd9a742020-08-12 12:58:50 +01002859 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01002860}
2861
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002862inline bool IsQSymm8(const V1_0::Operand&)
2863{
2864 return false;
2865}
2866
Kevin May42477c12020-03-26 13:34:14 +00002867#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002868
2869inline bool IsQSymm8(const V1_2::Operand& operand)
2870{
2871 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2872}
2873
2874#endif
2875
Kevin May42477c12020-03-26 13:34:14 +00002876#ifdef ARMNN_ANDROID_NN_V1_3
2877
2878inline bool IsQSymm8(const V1_3::Operand& operand)
2879{
2880 return operand.type == V1_3::OperandType::TENSOR_QUANT8_SYMM;
2881}
2882
2883#endif
2884
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002885enum class DequantizeStatus
2886{
2887 SUCCESS,
2888 NOT_REQUIRED,
2889 INVALID_OPERAND
2890};
2891
2892using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2893
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002894template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002895 typename HalOperation = typename HalPolicy::Operation,
2896 typename HalModel = typename HalPolicy::Model>
2897DequantizeResult DequantizeIfRequired(size_t operand_index,
2898 const HalOperation& operation,
2899 const HalModel& model,
2900 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002901{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002902 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002903
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002904 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002905 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002906 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002907 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002908 }
2909
2910 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2911 {
2912 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002913 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002914 }
2915
2916 const size_t weightsInputIndex = operation.inputs[operand_index];
2917
2918 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2919 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
Kevin May42477c12020-03-26 13:34:14 +00002920 for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002921 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002922 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Kevin May42477c12020-03-26 13:34:14 +00002923 const auto& operationIt = getMainModel(model).operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002924 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2925 {
2926 continue;
2927 }
2928
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002929 size_t outOpIndex = weightsInputIndex + 1;
2930 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002931 {
2932 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002933 }
2934
2935 if (outOpIndex != weightsInputIndex)
2936 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002937 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002938 }
2939
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002940 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002941 ARMNN_ASSERT(operand);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002942
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002943 if (!IsQSymm8(*operand))
2944 {
2945 // Only supporting dequantize from QSYMM8 to FLOAT
2946 break;
2947 }
2948
2949 // Allocate a new buffer for the dequantized data and manually dequantize
2950 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2951 if (!startValue)
2952 {
2953 // Failed to get the operand address
2954 break;
2955 }
2956
2957 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2958 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002959 const float quantizationScale = operand->scale;
2960
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002961 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2962 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2963 {
2964 float* dstPtr = dequantizedBuffer.get();
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +01002965 ARMNN_ASSERT(dstPtr);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002966 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2967 }
2968
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002969 // Construct tensor info for dequantized ConstTensor
2970 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2971 operand->dimensions.data(),
2972 armnn::DataType::Float32);
2973
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002974 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2975 std::move(tensorInfo),
2976 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002977 }
2978
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002979 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002980}
2981
2982template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002983 typename HalOperation = typename HalPolicy::Operation,
2984 typename HalModel = typename HalPolicy::Model>
2985ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2986 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002987 const ConversionData& data,
2988 size_t operandIndex,
2989 bool optional = false)
2990{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002991 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2992
2993 DequantizeStatus status = std::get<3>(dequantized);
2994 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002995 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002996 case DequantizeStatus::INVALID_OPERAND:
2997 {
2998 // return invalid const tensor pin
2999 return ConstTensorPin();
3000 }
3001 case DequantizeStatus::NOT_REQUIRED:
3002 {
3003 return ConvertOperationInputToConstTensorPin<HalPolicy>(
3004 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
3005 }
3006 case DequantizeStatus::SUCCESS:
3007 default:
3008 {
3009 return ConstTensorPin(
3010 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
3011 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003012 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003013}
3014
3015
Mike Kelly46272802019-08-14 17:00:48 +01003016template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003017 typename HalOperation = typename HalPolicy::Operation,
3018 typename HalModel = typename HalPolicy::Model>
3019bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003020{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003021 using HalOperand = typename HalPolicy::Operand;
3022
Mike Kelly46272802019-08-14 17:00:48 +01003023 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3024 if (!input.IsValid())
3025 {
3026 return Fail("%s: Operation has invalid inputs", __func__);
3027 }
3028
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003029 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003030 if (!output)
3031 {
3032 return Fail("%s: Could not read output 0", __func__);
3033 }
3034
3035 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3036 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3037
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00003038 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
3039 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003040
3041 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01003042 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003043 return Fail("%s: Operation has invalid weights", __func__);
3044 }
3045
3046 if (!biasPin.IsValid())
3047 {
3048 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003049 }
3050
3051 armnn::ConstTensor weights = weightsPin.GetConstTensor();
3052 armnn::ConstTensor bias = biasPin.GetConstTensor();
3053 armnn::TensorInfo reshapedInfo = inputInfo;
3054
3055 try
3056 {
3057 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01003058 }
3059 catch (const std::exception& e)
3060 {
Mike Kelly46272802019-08-14 17:00:48 +01003061 return Fail("%s: %s", __func__, e.what());
3062 }
3063
3064 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
3065 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
3066
3067 ActivationFn activationFunction;
3068 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
3069 {
3070 return Fail("%s: Operation has invalid inputs", __func__);
3071 }
3072
3073 armnn::FullyConnectedDescriptor desc;
3074 desc.m_TransposeWeightMatrix = true;
3075 desc.m_BiasEnabled = true;
3076
3077 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003078 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3079 {
Finn Williams49184462020-10-02 13:28:34 +01003080 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
3081 weights.GetInfo().GetShape(),
3082 outputInfo.GetShape(),
3083 desc.m_TransposeWeightMatrix))
3084 {
3085 isSupported = false;
3086 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
3087 return;
3088 }
3089
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003090 FORWARD_LAYER_SUPPORT_FUNC(__func__,
Mike Kelly46272802019-08-14 17:00:48 +01003091 IsFullyConnectedSupported,
3092 data.m_Backends,
3093 isSupported,
3094 reshapedInfo,
3095 outputInfo,
3096 weights.GetInfo(),
3097 bias.GetInfo(),
3098 desc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003099 };
3100
3101 if(!IsDynamicTensor(outputInfo))
3102 {
3103 validateFunc(outputInfo, isSupported);
3104 }
3105 else
3106 {
3107 isSupported = AreDynamicTensorsSupported();
3108 }
3109
Mike Kelly46272802019-08-14 17:00:48 +01003110 if (!isSupported)
3111 {
3112 return false;
3113 }
3114
3115 armnn::IConnectableLayer* startLayer =
3116 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Mike Kelly46272802019-08-14 17:00:48 +01003117
Kevin Mayfcf2a152020-09-08 16:06:32 +01003118 if (inputInfo.GetNumDimensions() > 2U)
Mike Kelly46272802019-08-14 17:00:48 +01003119 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003120 armnn::ReshapeDescriptor reshapeDescriptor;
3121 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Mike Kelly46272802019-08-14 17:00:48 +01003122
Kevin Mayfcf2a152020-09-08 16:06:32 +01003123 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3124 assert(reshapeLayer != nullptr);
3125 input.Connect(reshapeLayer->GetInputSlot(0));
3126 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
3127 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003128 }
3129 else
3130 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003131 input.Connect(startLayer->GetInputSlot(0));
Mike Kelly46272802019-08-14 17:00:48 +01003132 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003133
3134 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3135 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003136}
3137
3138template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003139 typename HalOperation = typename HalPolicy::Operation,
3140 typename HalModel = typename HalPolicy::Model>
3141bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003142{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003143 using HalOperand = typename HalPolicy::Operand;
3144
Mike Kelly999e2092019-08-15 10:46:46 +01003145 if (operation.inputs.size() != 1)
3146 {
3147 return Fail("%s: Optional inputs are not supported", __func__);
3148 }
3149
Mike Kelly46272802019-08-14 17:00:48 +01003150 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3151 if (!input.IsValid())
3152 {
3153 return Fail("%s: Operation has invalid inputs", __func__);
3154 }
3155
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003156 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003157 if (!output)
3158 {
3159 return Fail("%s: Could not read output 0", __func__);
3160 }
3161
3162 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3163 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3164
Mike Kelly46272802019-08-14 17:00:48 +01003165 if (outputInfo.GetNumDimensions() != 4u)
3166 {
3167 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3168 }
3169
3170 armnn::L2NormalizationDescriptor desc;
3171 desc.m_DataLayout = armnn::DataLayout::NHWC;
3172
3173 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003174 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3175 {
3176 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3177 IsL2NormalizationSupported,
3178 data.m_Backends,
3179 isSupported,
3180 inputInfo,
3181 outputInfo,
3182 desc);
3183 };
3184
3185 if(!IsDynamicTensor(outputInfo))
3186 {
3187 validateFunc(outputInfo, isSupported);
3188 }
3189 else
3190 {
3191 isSupported = AreDynamicTensorsSupported();
3192 }
3193
Mike Kelly46272802019-08-14 17:00:48 +01003194 if (!isSupported)
3195 {
3196 return false;
3197 }
3198
3199 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
3200 assert(layer != nullptr);
3201 input.Connect(layer->GetInputSlot(0));
3202
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003203 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003204}
3205
3206template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003207 typename HalOperation = typename HalPolicy::Operation,
3208 typename HalModel = typename HalPolicy::Model>
3209bool ConvertLocalResponseNormalization(const HalOperation& operation,
3210 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01003211 ConversionData& data)
3212{
Mike Kelly999e2092019-08-15 10:46:46 +01003213 if (operation.inputs.size() != 5)
3214 {
3215 return Fail("%s: Optional inputs are not supported", __func__);
3216 }
3217
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003218 using HalOperand = typename HalPolicy::Operand;
3219 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01003220
3221 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3222 if (!input.IsValid())
3223 {
3224 return Fail("%s: Operation has invalid inputs", __func__);
3225 }
3226
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003227 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003228 if (!output)
3229 {
3230 return Fail("%s: Could not read output 0", __func__);
3231 }
3232
3233 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3234 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3235
Mike Kelly46272802019-08-14 17:00:48 +01003236 if (outputInfo.GetNumDimensions() != 4u)
3237 {
3238 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
3239 }
3240
3241 armnn::NormalizationDescriptor descriptor;
3242 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3243 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
3244 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
3245
3246 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003247 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01003248 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
3249 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
3250 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
3251 {
3252 return Fail("%s: Operation has invalid inputs", __func__);
3253 }
3254
3255 // ArmNN expects normSize to be the full size of the normalization
3256 // window rather than the radius as in AndroidNN.
3257 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
3258
3259 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003260 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3261 {
3262 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3263 IsNormalizationSupported,
3264 data.m_Backends,
3265 isSupported,
3266 inputInfo,
3267 outputInfo,
3268 descriptor);
3269 };
3270
3271 if(!IsDynamicTensor(outputInfo))
3272 {
3273 validateFunc(outputInfo, isSupported);
3274 }
3275 else
3276 {
3277 isSupported = AreDynamicTensorsSupported();
3278 }
3279
Mike Kelly46272802019-08-14 17:00:48 +01003280 if (!isSupported)
3281 {
3282 return false;
3283 }
3284
3285
3286 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
3287 assert(layer != nullptr);
3288 input.Connect(layer->GetInputSlot(0));
3289
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003290 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003291}
3292
3293template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003294 typename HalOperation = typename HalPolicy::Operation,
3295 typename HalModel = typename HalPolicy::Model>
3296bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003297{
Mike Kelly46272802019-08-14 17:00:48 +01003298 armnn::ActivationDescriptor desc;
3299 desc.m_Function = armnn::ActivationFunction::Sigmoid;
3300
3301 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
3302}
3303
3304template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003305 typename HalOperation = typename HalPolicy::Operation,
3306 typename HalModel = typename HalPolicy::Model>
3307bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003308{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003309 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003310
3311 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3312 if (!input.IsValid())
3313 {
3314 return Fail("%s: Operation has invalid inputs", __func__);
3315 }
3316
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003317 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003318 if (!output)
3319 {
3320 return Fail("%s: Could not read output 0", __func__);
3321 }
3322
3323 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003324
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003325 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01003326 if (!axisOperand)
3327 {
3328 return Fail("%s: Could not read input 1", __func__);
3329 }
3330
3331 std::vector<int32_t> axis;
3332 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
3333 {
3334 return Fail("%s: Input 1 has invalid values", __func__);
3335 }
3336
3337 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3338
3339 // Convert the axis to unsigned int and remove duplicates.
3340 unsigned int rank = inputInfo.GetNumDimensions();
3341 std::set<unsigned int> uniqueAxis;
3342 std::transform(axis.begin(), axis.end(),
3343 std::inserter(uniqueAxis, uniqueAxis.begin()),
3344 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3345
3346 // Get the "keep dims" flag.
3347 int32_t keepDims = 0;
3348 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
3349 {
3350 return Fail("%s: Could not read input 2", __func__);
3351 }
3352
3353 armnn::MeanDescriptor descriptor;
3354 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3355 descriptor.m_KeepDims = keepDims > 0;
3356
3357 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003358 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3359 {
3360 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3361 IsMeanSupported,
3362 data.m_Backends,
3363 isSupported,
3364 inputInfo,
3365 outputInfo,
3366 descriptor);
3367 };
3368
3369 if(!IsDynamicTensor(outputInfo))
3370 {
3371 validateFunc(outputInfo, isSupported);
3372 }
3373 else
3374 {
3375 isSupported = AreDynamicTensorsSupported();
3376 }
3377
Mike Kelly46272802019-08-14 17:00:48 +01003378 if (!isSupported)
3379 {
3380 return false;
3381 }
3382
3383 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3384 assert(layer != nullptr);
3385 input.Connect(layer->GetInputSlot(0));
3386
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003387 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003388}
3389
3390template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003391 typename HalOperation = typename HalPolicy::Operation,
3392 typename HalModel = typename HalPolicy::Model>
3393bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003394{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003395 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003396
3397 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3398 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3399
3400 if (!input0.IsValid() || !input1.IsValid())
3401 {
3402 return Fail("%s: Operation has invalid inputs", __func__);
3403 }
3404
3405 // The FuseActivation parameter is always the input index 2
3406 // and it should be optional
3407 ActivationFn activationFunction;
3408 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3409 {
3410 return Fail("%s: Operation has invalid inputs", __func__);
3411 }
3412
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003413 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003414
3415 if (outputOperand == nullptr)
3416 {
3417 return false;
3418 }
3419
3420 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Mike Kelly46272802019-08-14 17:00:48 +01003421
3422 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003423 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3424 {
3425 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3426 IsMultiplicationSupported,
3427 data.m_Backends,
3428 isSupported,
3429 input0.GetTensorInfo(),
3430 input1.GetTensorInfo(),
3431 outputInfo);
3432 };
3433
3434 if(!IsDynamicTensor(outputInfo))
3435 {
3436 validateFunc(outputInfo, isSupported);
3437 }
3438 else
3439 {
3440 isSupported = AreDynamicTensorsSupported();
3441 }
3442
Mike Kelly46272802019-08-14 17:00:48 +01003443 if (!isSupported)
3444 {
3445 return false;
3446 }
3447
3448 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Mike Kelly46272802019-08-14 17:00:48 +01003449
3450 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3451 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3452
Kevin Mayfcf2a152020-09-08 16:06:32 +01003453 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3454 if (!isReshapeSupported)
Mike Kelly46272802019-08-14 17:00:48 +01003455 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003456 return false;
3457 }
Sadik Armagan64b19b52019-08-19 09:49:58 +01003458
Kevin Mayfcf2a152020-09-08 16:06:32 +01003459 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3460 data, nullptr, validateFunc, activationFunction);
Mike Kelly46272802019-08-14 17:00:48 +01003461}
3462
3463template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003464 typename HalOperation = typename HalPolicy::Operation,
3465 typename HalModel = typename HalPolicy::Model>
3466bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003467{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003468 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003469
Mike Kelly3c673942019-07-25 09:26:06 +01003470 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3471 if (!input.IsValid())
3472 {
3473 return Fail("%s: Operation has invalid inputs", __func__);
3474 }
3475
3476 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3477 unsigned int rank = inputInfo.GetNumDimensions();
3478
3479 armnn::PadDescriptor descriptor;
3480 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
3481 {
3482 return Fail("%s: Could not convert paddings", __func__);
3483 }
3484
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003485 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3486 // the scale and zeroPoint must be the same as input0
Mike Kelly3c673942019-07-25 09:26:06 +01003487 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3488 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3489 // (QuantizationOffset - QuantizationOffset) * scale = 0.
Sadik Armagan7b9ce8d2020-04-21 10:39:28 +01003490 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
Mike Kelly3c673942019-07-25 09:26:06 +01003491 {
3492 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3493 }
3494
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003495 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01003496 if (!output)
3497 {
3498 return Fail("%s: Could not read output", __func__);
3499 }
3500
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01003501 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01003502
3503 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003504 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3505 {
3506 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3507 IsPadSupported,
3508 data.m_Backends,
3509 isSupported,
3510 inputInfo,
3511 outputInfo,
3512 descriptor);
3513 };
3514
3515 if(!IsDynamicTensor(outputInfo))
3516 {
3517 validateFunc(outputInfo, isSupported);
3518 }
3519 else
3520 {
3521 isSupported = AreDynamicTensorsSupported();
3522 }
3523
Mike Kelly3c673942019-07-25 09:26:06 +01003524 if (!isSupported)
3525 {
3526 return false;
3527 }
3528
3529 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3530 assert(layer != nullptr);
3531 input.Connect(layer->GetInputSlot(0));
Mike Kelly3c673942019-07-25 09:26:06 +01003532
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003533 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly3c673942019-07-25 09:26:06 +01003534}
3535
Mike Kelly0a879362019-07-29 16:56:31 +01003536template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003537 typename HalOperation = typename HalPolicy::Operation,
3538 typename HalModel = typename HalPolicy::Model>
3539bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003540{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003541 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003542
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003543 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3544 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3545 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003546
3547 if (inputOperand == nullptr
3548 || requestedShapeOperand == nullptr
3549 || outputOperand == nullptr)
3550 {
3551 return Fail("%s: Operation has invalid inputs", __func__);
3552 }
3553
3554 if (requestedShapeOperand->dimensions.size() != 1)
3555 {
3556 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3557 __func__, requestedShapeOperand->dimensions.size());
3558 }
3559
3560 std::vector<int32_t> targetDimensions;
3561 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3562 {
3563 return Fail("%s: Could not read values of input 1", __func__);
3564 }
3565
3566 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3567
3568 Shape requestedShape;
3569 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3570 // function that resolves these values into a fully specified tensor shape.
3571 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3572 {
3573 return Fail("%s: Failed to resolve the requested shape", __func__);
3574 }
3575
Mike Kelly46272802019-08-14 17:00:48 +01003576 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3577 if (!input.IsValid())
3578 {
3579 return Fail("%s: Could not read input 0", __func__);
3580 }
3581
3582 armnn::ReshapeDescriptor reshapeDescriptor;
3583 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3584 requestedShape.dimensions.data());
3585
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003586 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3587
Mike Kelly46272802019-08-14 17:00:48 +01003588 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003589 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3590 {
3591 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3592 IsReshapeSupported,
3593 data.m_Backends,
3594 isSupported,
3595 input.GetTensorInfo(),
3596 outputInfo,
3597 reshapeDescriptor);
3598 };
3599
3600 if(!IsDynamicTensor(outputInfo))
3601 {
3602 validateFunc(outputInfo, isSupported);
3603 }
3604 else
3605 {
3606 isSupported = AreDynamicTensorsSupported();
3607 }
3608
Mike Kelly46272802019-08-14 17:00:48 +01003609 if (!isSupported)
3610 {
3611 return false;
3612 }
3613
3614 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3615 assert(layer != nullptr);
3616 input.Connect(layer->GetInputSlot(0));
3617
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003618 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003619}
3620
3621template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003622 typename HalOperation = typename HalPolicy::Operation,
3623 typename HalModel = typename HalPolicy::Model>
3624bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003625{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003626 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003627
Mike Kelly0a879362019-07-29 16:56:31 +01003628 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3629 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3630
3631 if (!input0.IsValid() || !input1.IsValid())
3632 {
3633 return Fail("%s: Operation has invalid inputs", __func__);
3634 }
3635
3636 // The FuseActivation parameter is always the input index 2
3637 // and it should be optional
3638 ActivationFn activationFunction;
3639 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3640 {
3641 return Fail("%s: Operation has invalid inputs", __func__);
3642 }
3643
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003644 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003645 if (!output)
3646 {
3647 return Fail("%s: Could not read output 0", __func__);
3648 }
3649
3650 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly0a879362019-07-29 16:56:31 +01003651
3652 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003653 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3654 {
3655 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3656 IsSubtractionSupported,
3657 data.m_Backends,
3658 isSupported,
3659 input0.GetTensorInfo(),
3660 input1.GetTensorInfo(),
3661 outputInfo);
3662 };
3663
3664 if(IsDynamicTensor(outputInfo))
3665 {
3666 isSupported = AreDynamicTensorsSupported();
3667 }
3668 else
3669 {
3670 validateFunc(outputInfo, isSupported);
3671 }
3672
Mike Kelly0a879362019-07-29 16:56:31 +01003673 if (!isSupported)
3674 {
3675 return false;
3676 }
3677
3678 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Mike Kelly0a879362019-07-29 16:56:31 +01003679
3680 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3681 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3682
Kevin Mayfcf2a152020-09-08 16:06:32 +01003683 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3684 if (!isReshapeSupported)
Mike Kelly0a879362019-07-29 16:56:31 +01003685 {
Kevin Mayfcf2a152020-09-08 16:06:32 +01003686 return false;
Mike Kelly0a879362019-07-29 16:56:31 +01003687 }
Kevin Mayfcf2a152020-09-08 16:06:32 +01003688 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *startLayer, model,
3689 data, nullptr, validateFunc, activationFunction);
Mike Kelly0a879362019-07-29 16:56:31 +01003690}
3691
Finn Williams23b87b32019-07-30 11:44:05 +01003692template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003693 typename HalOperation = typename HalPolicy::Operation,
3694 typename HalModel = typename HalPolicy::Model>
3695bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003696{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003697 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003698
3699 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3700 if (!input.IsValid())
3701 {
3702 return Fail("%s: Operation has invalid inputs", __func__);
3703 }
3704
3705 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3706 unsigned int rank = inputInfo.GetNumDimensions();
3707 if (rank > 4)
3708 {
3709 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3710 }
3711
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003712 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003713 if (!output)
3714 {
3715 return Fail("%s: Could not read output 0", __func__);
3716 }
3717
Sadik Armagan346e8112020-09-02 09:55:14 +01003718 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
Mike Kelly46272802019-08-14 17:00:48 +01003719 {
3720 return Fail("%s: Dynamic output tensors are not supported", __func__);
3721 }
3722
3723 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3724 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003725 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003726
3727 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3728
3729 std::vector<int32_t> axis;
3730 if (!axisOperand)
3731 {
3732 axis.assign(dimensionSequence,
3733 dimensionSequence + rank);
3734 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003735 else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003736 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003737 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003738 }
3739
3740 std::vector<uint32_t> outputDims;
3741 for (unsigned int i = 0; i < rank; i++)
3742 {
3743 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3744 auto currentDimension = inputInfo.GetShape()[i];
3745 if (skipSqueeze || currentDimension != 1)
3746 {
3747 outputDims.push_back(currentDimension);
3748 }
3749 }
3750
3751 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3752
3753 armnn::TensorInfo outputInfo = inputInfo;
3754 outputInfo.SetShape(outShape);
3755
3756 armnn::ReshapeDescriptor reshapeDesc;
3757 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3758
3759 bool isSupported = false;
3760 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3761 IsReshapeSupported,
3762 data.m_Backends,
3763 isSupported,
3764 inputInfo,
Kevin Mayaed08ac2019-12-12 16:33:31 +00003765 outputInfo,
Mike Kelly46272802019-08-14 17:00:48 +01003766 reshapeDesc);
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003767
Mike Kelly46272802019-08-14 17:00:48 +01003768 if (!isSupported)
3769 {
3770 return false;
3771 }
3772
3773 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3774 assert(layer != nullptr);
3775 input.Connect(layer->GetInputSlot(0));
3776
3777 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3778}
3779
3780template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003781 typename HalOperation = typename HalPolicy::Operation,
3782 typename HalModel = typename HalPolicy::Model>
3783bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003784{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003785 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003786
3787 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3788 if (!input.IsValid())
3789 {
3790 return Fail("%s: Operation has invalid inputs", __func__);
3791 }
3792
3793 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3794 unsigned int rank = inputInfo.GetNumDimensions();
3795 if (rank > 4)
3796 {
3797 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3798 }
3799
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003800 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003801 if (!output)
3802 {
3803 return Fail("%s: Could not read output 0", __func__);
3804 }
3805
3806 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly46272802019-08-14 17:00:48 +01003807
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003808 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3809 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3810 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003811
3812 std::vector<int32_t> beginValues;
3813 std::vector<int32_t> endValues;
3814 std::vector<int32_t> stridesValues;
3815
3816 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003817 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003818 {
3819 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3820 {
3821 return false;
3822 }
3823
3824 if (operandValues.size() != rank)
3825 {
3826 return false;
3827 }
3828
3829 return true;
3830 };
3831
3832 if (!ValidateInputOperands(*beginOperand, beginValues)
3833 || !ValidateInputOperands(*endOperand, endValues)
3834 || !ValidateInputOperands(*stridesOperand, stridesValues))
3835 {
3836 return Fail("%s: Operation has invalid input operand", __func__);
3837 }
3838
3839 // Stride cannot have value '0'
3840 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3841 {
3842 return Fail("%s: Stride must be non-zero value.", __func__);
3843 }
3844
3845 armnn::StridedSliceDescriptor descriptor;
3846 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3847 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3848 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3849 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3850
3851 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3852 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3853 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3854 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3855 {
3856 return Fail("%s: Operation has invalid inputs", __func__);
3857 }
3858
3859 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003860 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3861 {
3862 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3863 IsStridedSliceSupported,
3864 data.m_Backends,
3865 isSupported,
3866 inputInfo,
3867 outputInfo,
3868 descriptor);
3869 };
3870
3871 if(IsDynamicTensor(outputInfo))
3872 {
3873 isSupported = AreDynamicTensorsSupported();
3874 }
3875 else
3876 {
3877 validateFunc(outputInfo, isSupported);
3878 }
3879
Mike Kelly46272802019-08-14 17:00:48 +01003880 if (!isSupported)
3881 {
3882 return false;
3883 }
3884
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003885 // Check if slice can fit in a inferred output
3886 armnn::TensorShape inputShape = inputInfo.GetShape();
3887 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
3888 {
3889 int stride = descriptor.m_Stride[i];
3890 int start = descriptor.GetStartForAxis(inputShape, i);
3891 int stop = descriptor.GetStopForAxis(inputShape, i, start);
3892
3893 if (descriptor.m_ShrinkAxisMask & (1 << i))
3894 {
3895 // If the difference between the start point and the end point of the slice on an axis being shrunk
3896 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
3897 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
3898 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
3899 {
3900 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
3901 }
Ryan OShea00b586b2020-07-03 11:31:20 +01003902
3903 if(stride < 0)
3904 {
3905 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
3906 }
Sadik Armaganbe6b3c22020-05-14 11:51:33 +01003907 }
3908 }
3909
Mike Kelly46272802019-08-14 17:00:48 +01003910 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3911 assert(layer != nullptr);
3912 input.Connect(layer->GetInputSlot(0));
3913
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003914 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003915}
3916
3917template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003918 typename HalOperation = typename HalPolicy::Operation,
3919 typename HalModel = typename HalPolicy::Model>
3920bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003921{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003922 using HalOperand = typename HalPolicy::Operand;
Kevin May81f27fd2020-08-20 10:22:53 +01003923 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
Mike Kelly46272802019-08-14 17:00:48 +01003924
3925 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3926 if (!input.IsValid())
3927 {
3928 return Fail("%s: Operation has invalid inputs", __func__);
3929 }
3930
3931 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3932 unsigned int rank = inputInfo.GetNumDimensions();
3933 if (rank > 4)
3934 {
3935 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3936 }
3937
3938 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3939 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003940 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003941
3942 std::vector<int32_t> perm(rank);
Kevin May81f27fd2020-08-20 10:22:53 +01003943 if (!permOperand || (permOperand->lifetime == HalOperandLifeTime::NO_VALUE))
Mike Kelly46272802019-08-14 17:00:48 +01003944 {
Mike Kelly46272802019-08-14 17:00:48 +01003945 for (unsigned int i = rank; i > 0; i--)
3946 {
Matthew Sloyan9b088d92020-09-14 15:12:55 +01003947 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
Mike Kelly46272802019-08-14 17:00:48 +01003948 }
3949 }
Mike Kellyeec836e2020-02-18 10:03:30 +00003950 else if (!GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data))
Mike Kelly46272802019-08-14 17:00:48 +01003951 {
Mike Kellyeec836e2020-02-18 10:03:30 +00003952 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01003953 }
3954
3955 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3956
Mike Kelly4a956582020-02-28 10:32:09 +00003957 armnn::TransposeDescriptor transposeDesc;
3958 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003959
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003960 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003961 if (!output)
3962 {
3963 return Fail("%s: Could not read output 0", __func__);
3964 }
3965
3966 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3967
3968 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003969 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3970 {
3971 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3972 IsTransposeSupported,
3973 data.m_Backends,
3974 isSupported,
3975 inputInfo,
3976 outputInfo,
3977 transposeDesc);
3978 };
3979
3980 if(IsDynamicTensor(outputInfo))
3981 {
3982 isSupported = AreDynamicTensorsSupported();
3983 }
3984 else
3985 {
3986 validateFunc(outputInfo, isSupported);
3987 }
3988
Mike Kelly46272802019-08-14 17:00:48 +01003989 if (!isSupported)
3990 {
3991 return false;
3992 }
3993
Mike Kelly4a956582020-02-28 10:32:09 +00003994 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Mike Kelly46272802019-08-14 17:00:48 +01003995 assert(layer != nullptr);
3996 input.Connect(layer->GetInputSlot(0));
3997
Teresa Charlin4bd9a742020-08-12 12:58:50 +01003998 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Mike Kelly46272802019-08-14 17:00:48 +01003999}
4000
4001template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01004002 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01004003 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01004004 typename HalModel = typename HalPolicy::Model>
4005bool ConvertBatchToSpaceNd(const HalOperation& operation,
4006 const HalModel& model,
4007 ConversionData& data)
4008{
Finn Williams23b87b32019-07-30 11:44:05 +01004009
4010 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4011 if (!input.IsValid())
4012 {
4013 return Fail("%s: Operation has invalid inputs", __func__);
4014 }
4015
4016 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4017 if (!output)
4018 {
4019 return Fail("%s: Could not read output 0", __func__);
4020 }
4021
4022 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williams23b87b32019-07-30 11:44:05 +01004023
4024 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4025 if (!blockOperand)
4026 {
4027 return Fail("%s: Could not read input 1", __func__);
4028 }
4029
4030 // Convert the block operand to int32
4031 std::vector<int32_t> block;
4032 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
4033 {
4034 return Fail("%s: Input 1 has invalid values", __func__);
4035 }
4036
4037 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4038
4039 unsigned int rank = inputInfo.GetNumDimensions();
4040 if (rank != 4)
4041 {
4042 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
4043 }
4044
4045 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
4046 {
4047 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
4048 " greater than or equal to 1", __func__);
4049 }
4050
4051 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
4052 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
4053 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
4054
Kevin May42477c12020-03-26 13:34:14 +00004055 if (Is12OrLaterOperand(*output))
Finn Williams23b87b32019-07-30 11:44:05 +01004056 {
Finn Williams0e4e4392019-07-31 10:56:27 +01004057 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01004058 }
4059 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
4060 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
4061
4062 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004063 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4064 {
4065 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4066 IsBatchToSpaceNdSupported,
4067 data.m_Backends,
4068 isSupported,
4069 inputInfo,
4070 outputInfo,
4071 batchToSpaceNdDesc);
4072 };
4073
4074 if(!IsDynamicTensor(outputInfo))
4075 {
4076 validateFunc(outputInfo, isSupported);
4077 }
4078 else
4079 {
4080 isSupported = AreDynamicTensorsSupported();
4081 }
4082
4083
Finn Williams23b87b32019-07-30 11:44:05 +01004084 if (!isSupported)
4085 {
4086 return false;
4087 }
4088
4089 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
4090 assert(layer != nullptr);
4091 input.Connect(layer->GetInputSlot(0));
4092
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004093 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williams23b87b32019-07-30 11:44:05 +01004094}
Mike Kelly0a879362019-07-29 16:56:31 +01004095
Finn Williamsd74c5052019-07-30 17:06:00 +01004096template<typename HalPolicy,
4097 typename HalOperation = typename HalPolicy::Operation,
4098 typename HalOperand = typename HalPolicy::Operand,
4099 typename HalModel = typename HalPolicy::Model>
4100bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
4101{
4102 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
4103 if (!input.IsValid())
4104 {
4105 return Fail("%s: Operation has invalid inputs", __func__);
4106 }
4107
4108 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
4109 unsigned int rank = inputInfo.GetNumDimensions();
4110 unsigned int spatialDim = rank - 2;
4111
4112 if (rank != 4)
4113 {
4114 Fail("%s: Only inputs with rank 4 are supported", __func__);
4115 }
4116
4117 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
4118 if (!output)
4119 {
4120 return Fail("%s: Could not read output 0", __func__);
4121 }
4122
4123 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Finn Williamsd74c5052019-07-30 17:06:00 +01004124
4125 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
4126 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
4127
4128 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4129 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4130 {
4131 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4132 }
4133
4134 std::vector<int32_t> blockShape;
Mike Kellyeec836e2020-02-18 10:03:30 +00004135 if (!GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data))
4136 {
4137 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4138 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004139 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
4140 {
4141 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4142 }
4143
4144 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4145 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4146 {
4147 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4148 }
4149
4150 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4151 std::vector<int32_t> paddings;
Mike Kellyeec836e2020-02-18 10:03:30 +00004152 if (!GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data))
4153 {
4154 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4155 }
Finn Williamsd74c5052019-07-30 17:06:00 +01004156 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4157 {
4158 int paddingBeforeInput = paddings[i];
4159 int paddingAfterInput = paddings[i + 1];
4160 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
4161 {
4162 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4163 }
4164
4165 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4166 }
4167
4168 armnn::SpaceToBatchNdDescriptor descriptor;
4169 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
4170 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4171 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4172
Kevin May42477c12020-03-26 13:34:14 +00004173 if (Is12OrLaterOperand(*output))
Finn Williamsd74c5052019-07-30 17:06:00 +01004174 {
4175 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
4176 }
4177
4178 bool isSupported = false;
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004179 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4180 {
4181 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4182 IsSpaceToBatchNdSupported,
4183 data.m_Backends,
4184 isSupported,
4185 inputInfo,
4186 outputInfo,
4187 descriptor);
4188 };
4189
4190 if(IsDynamicTensor(outputInfo))
4191 {
4192 isSupported = AreDynamicTensorsSupported();
4193 }
4194 else
4195 {
4196 validateFunc(outputInfo, isSupported);
4197 }
4198
Finn Williamsd74c5052019-07-30 17:06:00 +01004199 if (!isSupported)
4200 {
4201 return false;
4202 }
4203
4204 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4205 assert(layer != nullptr);
4206 input.Connect(layer->GetInputSlot(0));
4207
Teresa Charlin4bd9a742020-08-12 12:58:50 +01004208 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data, nullptr, validateFunc);
Finn Williamsd74c5052019-07-30 17:06:00 +01004209}
4210
saoste01b8471482018-10-10 09:44:51 +01004211} // namespace armnn_driver
Kevin DuBoisa2cb5482020-08-26 13:41:12 -07004212#ifdef __clang__
4213#pragma clang diagnostic pop
4214#endif