blob: 81e5cf1e13aec4856b11f00f4560c061029a552c [file] [log] [blame]
Jean-Luc Brouillet96775122017-07-12 01:37:27 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Utils"
18
Slava Shklyaev3b1ea252018-11-06 15:32:44 +000019#include "Utils.h"
20
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000021#include <android-base/logging.h>
22#include <android-base/properties.h>
23#include <android-base/strings.h>
Xusong Wangca8c1cb2020-05-06 13:49:19 -070024#include <errno.h>
25#include <poll.h>
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000026#include <sys/system_properties.h>
Colin Crossbd7f9c42019-10-10 22:58:13 +000027
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000028#include <algorithm>
Colin Crossbd7f9c42019-10-10 22:58:13 +000029#include <limits>
30#include <set>
31#include <string>
32#include <tuple>
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000033#include <unordered_map>
Colin Crossbd7f9c42019-10-10 22:58:13 +000034#include <utility>
35#include <vector>
36
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +000037#include "ControlFlow.h"
Colin Crossbd7f9c42019-10-10 22:58:13 +000038#include "NeuralNetworks.h"
39#include "NeuralNetworksOEM.h"
40#include "OperationResolver.h"
41#include "ValidateHal.h"
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000042
Jean-Luc Brouillet96775122017-07-12 01:37:27 -070043namespace android {
44namespace nn {
45
Michael Butlerd92f9742019-07-11 11:45:01 -070046using namespace hal;
47
Slava Shklyaev4bdb9822020-01-23 16:32:04 +000048constexpr PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
49
Miao Wang820215d2017-10-04 19:45:45 -070050const char kVLogPropKey[] = "debug.nn.vlog";
51int vLogMask = ~0;
52
53// Split the space separated list of tags from verbose log setting and build the
54// logging mask from it. note that '1' and 'all' are special cases to enable all
55// verbose logging.
56//
57// NN API verbose logging setting comes from system property debug.nn.vlog.
58// Example:
59// setprop debug.nn.vlog 1 : enable all logging tags.
60// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
61// COMPILATION tags.
62void initVLogMask() {
63 vLogMask = 0;
64 const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
65 if (vLogSetting.empty()) {
66 return;
67 }
68
Michael Butler43953b82019-07-22 18:59:46 -070069 std::unordered_map<std::string, int> vLogFlags = {{"1", -1},
70 {"all", -1},
71 {"model", MODEL},
72 {"compilation", COMPILATION},
73 {"execution", EXECUTION},
74 {"cpuexe", CPUEXE},
75 {"manager", MANAGER},
Xusong Wang5d0e5552019-11-27 12:52:28 -080076 {"driver", DRIVER},
77 {"memory", MEMORY}};
Miao Wang820215d2017-10-04 19:45:45 -070078
David Grossd55ec4a2018-03-27 15:48:03 -070079 std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
Miao Wang820215d2017-10-04 19:45:45 -070080 for (const auto& elem : elements) {
81 const auto& flag = vLogFlags.find(elem);
82 if (flag == vLogFlags.end()) {
83 LOG(ERROR) << "Unknown trace flag: " << elem;
84 continue;
85 }
86
87 if (flag->second == -1) {
88 // -1 is used for the special values "1" and "all" that enable all
89 // tracing.
90 vLogMask = ~0;
91 return;
92 } else {
93 vLogMask |= 1 << flag->second;
94 }
95 }
96}
97
Michael Butlerf2091af2020-02-25 11:39:05 -080098Deadline makeDeadline(uint64_t duration) {
99 const auto maxTime = Deadline::max();
100 const auto currentTime = std::chrono::steady_clock::now();
101
102 // Create Deadline. If there would be an overflow, use the max value.
103 const uint64_t remainingNanoseconds =
104 std::chrono::duration_cast<std::chrono::nanoseconds>(maxTime - currentTime).count();
105 if (duration > remainingNanoseconds) {
106 return maxTime;
107 }
108 return currentTime + std::chrono::nanoseconds{duration};
Michael Butler7002a0a2020-02-17 20:38:13 -0800109}
Michael Butler83e406e2019-12-16 18:32:45 -0800110
Michael Butlerf2091af2020-02-25 11:39:05 -0800111std::optional<Deadline> makeDeadline(std::optional<uint64_t> duration) {
112 return duration.has_value() ? makeDeadline(*duration) : std::optional<Deadline>{};
Michael Butler7002a0a2020-02-17 20:38:13 -0800113}
114
Michael Butlerf2091af2020-02-25 11:39:05 -0800115static uint64_t getMaxNanosecondsSinceEpoch() {
116 const auto maxTime =
117 std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds>::max();
118 return maxTime.time_since_epoch().count();
119}
Michael Butler39c6e3e2020-02-12 18:57:53 -0800120
Michael Butlerf2091af2020-02-25 11:39:05 -0800121std::optional<Deadline> makeDeadline(const OptionalTimePoint& timePoint) {
122 using Discriminator = hal::OptionalTimePoint::hidl_discriminator;
123 if (timePoint.getDiscriminator() == Discriminator::none) {
124 return std::nullopt;
125 }
126 const uint64_t nanosecondsSinceEpoch = timePoint.nanosecondsSinceEpoch();
127 const uint64_t maxNanosecondsSinceEpoch = getMaxNanosecondsSinceEpoch();
128
129 // Clamp time point to max.
130 if (nanosecondsSinceEpoch >= maxNanosecondsSinceEpoch) {
131 return Deadline::max();
Michael Butler83e406e2019-12-16 18:32:45 -0800132 }
Michael Butler83e406e2019-12-16 18:32:45 -0800133
Michael Butlerf2091af2020-02-25 11:39:05 -0800134 // Return provided time point.
135 return Deadline{std::chrono::nanoseconds{nanosecondsSinceEpoch}};
Michael Butler83e406e2019-12-16 18:32:45 -0800136}
137
Michael Butlerf2091af2020-02-25 11:39:05 -0800138bool hasDeadlinePassed(const std::optional<Deadline>& deadline) {
139 if (!deadline.has_value()) {
140 return false;
141 }
142 return std::chrono::steady_clock::now() >= *deadline;
143}
144
145static OptionalTimePoint makeTimePoint(const Deadline& deadline) {
146 const auto timeSinceEpoch = deadline.time_since_epoch();
147 const uint64_t nanosecondsSinceEpoch =
148 std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
149 OptionalTimePoint ret;
150 ret.nanosecondsSinceEpoch(nanosecondsSinceEpoch);
151 return ret;
152}
153
154OptionalTimePoint makeTimePoint(const std::optional<Deadline>& deadline) {
155 return deadline.has_value() ? makeTimePoint(*deadline) : OptionalTimePoint{};
Michael Butler83e406e2019-12-16 18:32:45 -0800156}
157
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000158static bool isExtensionOperandType(int32_t type) {
159 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
160}
161
162static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
163 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
164}
165
166bool isExtensionOperandType(OperandType type) {
167 return isExtensionOperandType(static_cast<int32_t>(type));
168}
169
170bool isExtensionOperationType(OperationType type) {
171 return isExtensionOperationType(static_cast<int32_t>(type));
172}
173
David Gross5e7827e2017-09-13 12:31:13 -0700174namespace {
175
176template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
177EntryType tableLookup(const EntryType (&table)[entryCount],
Michael Butler43953b82019-07-22 18:59:46 -0700178 const EntryType (&tableOEM)[entryCountOEM], uint32_t code) {
David Gross5e7827e2017-09-13 12:31:13 -0700179 if (code < entryCount) {
180 return table[code];
181 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
182 return tableOEM[code - kOEMCodeBase];
183 } else {
184 nnAssert(!"tableLookup: bad code");
185 return EntryType();
186 }
187}
188
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000189class OperationValidationContext : public IOperationValidationContext {
190 DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
191
192 public:
Slava Shklyaev432890e2019-09-30 16:04:43 +0100193 OperationValidationContext(const char* operationName, uint32_t inputCount,
194 const uint32_t* inputIndexes, uint32_t outputCount,
195 const uint32_t* outputIndexes, const Operand* operands,
196 HalVersion halVersion)
197 : operationName(operationName),
198 inputCount(inputCount),
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000199 inputIndexes(inputIndexes),
200 outputCount(outputCount),
201 outputIndexes(outputIndexes),
202 operands(operands),
203 halVersion(halVersion) {}
204
Slava Shklyaev432890e2019-09-30 16:04:43 +0100205 const char* getOperationName() const override;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000206 HalVersion getHalVersion() const override;
207
208 uint32_t getNumInputs() const override;
209 OperandType getInputType(uint32_t index) const override;
210 Shape getInputShape(uint32_t index) const override;
Michael Butler2d3826e2020-02-04 16:08:11 -0800211 const OperandExtraParams getInputExtraParams(uint32_t index) const override;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000212
213 uint32_t getNumOutputs() const override;
214 OperandType getOutputType(uint32_t index) const override;
215 Shape getOutputShape(uint32_t index) const override;
216
217 private:
218 const Operand* getInputOperand(uint32_t index) const;
219 const Operand* getOutputOperand(uint32_t index) const;
220
Slava Shklyaev432890e2019-09-30 16:04:43 +0100221 const char* operationName;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000222 uint32_t inputCount;
223 const uint32_t* inputIndexes;
224 uint32_t outputCount;
225 const uint32_t* outputIndexes;
226 const Operand* operands;
227 HalVersion halVersion;
228};
229
Slava Shklyaev432890e2019-09-30 16:04:43 +0100230const char* OperationValidationContext::getOperationName() const {
231 return operationName;
232}
233
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000234HalVersion OperationValidationContext::getHalVersion() const {
235 return halVersion;
236}
237
238const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
239 CHECK(index < static_cast<uint32_t>(inputCount));
240 return &operands[inputIndexes[index]];
241}
242
243const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
244 CHECK(index < static_cast<uint32_t>(outputCount));
245 return &operands[outputIndexes[index]];
246}
247
248uint32_t OperationValidationContext::getNumInputs() const {
249 return inputCount;
250}
251
252uint32_t OperationValidationContext::getNumOutputs() const {
253 return outputCount;
254}
255
256OperandType OperationValidationContext::getInputType(uint32_t index) const {
257 return getInputOperand(index)->type;
258}
259
260Shape OperationValidationContext::getInputShape(uint32_t index) const {
261 const Operand* operand = getInputOperand(index);
Slava Shklyaevf50f2232019-01-22 14:23:55 +0000262 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
263 operand->extraParams};
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000264}
265
Michael Butler2d3826e2020-02-04 16:08:11 -0800266const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
Xusong Wang4d7410e2019-03-13 15:20:16 -0700267 return getInputOperand(index)->extraParams;
268}
269
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000270OperandType OperationValidationContext::getOutputType(uint32_t index) const {
271 return getOutputOperand(index)->type;
272}
273
274Shape OperationValidationContext::getOutputShape(uint32_t index) const {
275 const Operand* operand = getOutputOperand(index);
Slava Shklyaevf50f2232019-01-22 14:23:55 +0000276 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
277 operand->extraParams};
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000278}
279
David Gross5e7827e2017-09-13 12:31:13 -0700280}; // anonymous namespace
281
Jean-Luc Brouilletc5e342b2017-10-11 22:28:55 -0700282#define COUNT(X) (sizeof(X) / sizeof(X[0]))
283
Slava Shklyaevd1e50b12019-01-16 16:34:51 +0000284std::string getOperandTypeName(OperandType type) {
285 return toString(type);
Jean-Luc Brouilletc5e342b2017-10-11 22:28:55 -0700286}
287
Slava Shklyaev6e912802019-01-07 14:16:49 +0000288static std::string getOperationName(uint32_t code) {
289 return getOperationName(static_cast<OperationType>(code));
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000290}
291
Slava Shklyaev6e912802019-01-07 14:16:49 +0000292std::string getOperationName(OperationType type) {
293 return toString(type);
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700294}
295
Jean-Luc Brouilletc1ab6fc2017-08-31 18:07:29 -0700296const uint32_t kSizeOfDataType[]{
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100297 4, // ANEURALNETWORKS_FLOAT32
298 4, // ANEURALNETWORKS_INT32
299 4, // ANEURALNETWORKS_UINT32
300 4, // ANEURALNETWORKS_TENSOR_FLOAT32
301 4, // ANEURALNETWORKS_TENSOR_INT32
Colin Crossbd7f9c42019-10-10 22:58:13 +0000302 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100303 1, // ANEURALNETWORKS_BOOL
Lev Proleev19c95f22018-11-13 15:44:31 +0000304 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
Michael K. Sanders8c8964e2018-10-12 09:07:36 +0100305 2, // ANEURALNETWORKS_TENSOR_FLOAT16
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000306 1, // ANEURALNETWORKS_TENSOR_BOOL8
Xusong Wang1fdeca82018-12-05 15:04:07 -0800307 2, // ANEURALNETWORKS_FLOAT16
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000308 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
Xusong Wanga69b8b12019-01-17 11:30:09 -0800309 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000310 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
Colin Crossbd7f9c42019-10-10 22:58:13 +0000311 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000312 0, // ANEURALNETWORKS_MODEL
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700313};
314
Jean-Luc Brouilletc1ab6fc2017-08-31 18:07:29 -0700315static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
316
David Gross96811e22017-10-02 14:40:09 -0700317const bool kScalarDataType[]{
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100318 true, // ANEURALNETWORKS_FLOAT32
319 true, // ANEURALNETWORKS_INT32
320 true, // ANEURALNETWORKS_UINT32
321 false, // ANEURALNETWORKS_TENSOR_FLOAT32
322 false, // ANEURALNETWORKS_TENSOR_INT32
Colin Crossbd7f9c42019-10-10 22:58:13 +0000323 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100324 true, // ANEURALNETWORKS_BOOL
Lev Proleev19c95f22018-11-13 15:44:31 +0000325 false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
Michael K. Sanders8c8964e2018-10-12 09:07:36 +0100326 false, // ANEURALNETWORKS_TENSOR_FLOAT16
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000327 false, // ANEURALNETWORKS_TENSOR_BOOL8
Xusong Wang1fdeca82018-12-05 15:04:07 -0800328 true, // ANEURALNETWORKS_FLOAT16
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000329 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
Xusong Wanga69b8b12019-01-17 11:30:09 -0800330 false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000331 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
Colin Crossbd7f9c42019-10-10 22:58:13 +0000332 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000333 true, // ANEURALNETWORKS_MODEL
David Gross96811e22017-10-02 14:40:09 -0700334};
335
336static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
337
David Gross5e7827e2017-09-13 12:31:13 -0700338const uint32_t kSizeOfDataTypeOEM[]{
Michael Butler43953b82019-07-22 18:59:46 -0700339 0, // ANEURALNETWORKS_OEM
340 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
David Gross5e7827e2017-09-13 12:31:13 -0700341};
342
343static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
344 "kSizeOfDataTypeOEM is incorrect");
345
David Gross96811e22017-10-02 14:40:09 -0700346const bool kScalarDataTypeOEM[]{
Michael Butler43953b82019-07-22 18:59:46 -0700347 true, // ANEURALNETWORKS_OEM
348 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
David Gross96811e22017-10-02 14:40:09 -0700349};
350
351static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
352 "kScalarDataTypeOEM is incorrect");
353
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000354bool nonExtensionOperandTypeIsScalar(int type) {
355 CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
356 return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
357}
358
359uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000360 CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700361 int n = static_cast<int>(type);
Slava Shklyaev699000b2020-03-23 16:50:36 +0000362 uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
363 return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
364 ? sizeOfElement
365 : sizeOfTensorData(sizeOfElement, dimensions);
366}
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700367
Xusong Wang514cec42020-03-30 15:15:07 -0700368// Returns a pair of {false, size} on success, {true, 0} if size overflows uint32_t.
369static std::pair<bool, uint32_t> sizeOfTensorDataHelper(uint32_t sizeOfElement,
370 const std::vector<uint32_t>& dimensions) {
Xusong Wange5be7ce2018-11-07 15:03:29 -0800371 if (dimensions.empty()) {
Xusong Wang514cec42020-03-30 15:15:07 -0700372 return {false, 0};
Xusong Wange5be7ce2018-11-07 15:03:29 -0800373 }
Slava Shklyaev699000b2020-03-23 16:50:36 +0000374 uint64_t size = static_cast<uint64_t>(sizeOfElement);
375 constexpr uint64_t kMaxSize = static_cast<uint64_t>(std::numeric_limits<uint32_t>::max());
376 for (uint32_t d : dimensions) {
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700377 size *= d;
Xusong Wang514cec42020-03-30 15:15:07 -0700378 if (size > kMaxSize) return {true, 0};
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700379 }
Xusong Wang514cec42020-03-30 15:15:07 -0700380 return {false, static_cast<uint32_t>(size)};
381}
382
383uint32_t sizeOfTensorData(uint32_t sizeOfElement, const std::vector<uint32_t>& dimensions) {
384 const auto [overflow, size] = sizeOfTensorDataHelper(sizeOfElement, dimensions);
385 CHECK(!overflow);
386 return size;
387}
388
389bool nonExtensionOperandSizeOfDataOverflowsUInt32(hal::OperandType type,
390 const std::vector<uint32_t>& dimensions) {
391 CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
392 int n = static_cast<int>(type);
393 uint32_t sizeOfElement = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
394 return tableLookup(kScalarDataType, kScalarDataTypeOEM, n)
395 ? false
396 : sizeOfTensorDataOverflowsUInt32(sizeOfElement, dimensions);
397}
398
399bool sizeOfTensorDataOverflowsUInt32(uint32_t sizeOfElement,
400 const std::vector<uint32_t>& dimensions) {
401 return sizeOfTensorDataHelper(sizeOfElement, dimensions).first;
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700402}
403
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000404bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
405 if (!isExtensionOperandType(type)) {
406 CHECK(!nonExtensionOperandTypeIsScalar(type))
407 << "A scalar type can never have unspecified dimensions";
Slava Shklyaeva1516352019-02-06 16:35:49 +0000408 }
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000409 return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
David Gross1ec15052018-06-01 11:01:12 -0700410}
411
Xusong Wang5a40cdb2020-02-03 16:24:35 -0800412bool tensorHasUnspecifiedDimensions(OperandType type, const std::vector<uint32_t>& dimensions) {
413 return tensorHasUnspecifiedDimensions(static_cast<int>(type), dimensions.data(),
414 dimensions.size());
415}
416
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000417bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
418 return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
David Gross1ec15052018-06-01 11:01:12 -0700419}
420
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000421bool tensorHasUnspecifiedDimensions(const Operand& operand) {
422 return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
423 operand.dimensions.size());
David Gross1ec15052018-06-01 11:01:12 -0700424}
425
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700426uint32_t alignBytesNeeded(uint32_t index, size_t length) {
427 uint32_t pattern;
428 if (length < 2) {
Michael Butler43953b82019-07-22 18:59:46 -0700429 pattern = 0; // No alignment necessary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700430 } else if (length < 4) {
Michael Butler43953b82019-07-22 18:59:46 -0700431 pattern = 1; // Align on 2-byte boundary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700432 } else {
Michael Butler43953b82019-07-22 18:59:46 -0700433 pattern = 3; // Align on 4-byte boundary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700434 }
435 uint32_t extra = (~(index - 1)) & pattern;
436 return extra;
437}
438
Michael Butler75886e72018-01-23 11:05:43 -0800439void logModelToInfo(const V1_0::Model& model) {
440 LOG(INFO) << "V1_0::Model start";
441 LOG(INFO) << "operands" << toString(model.operands);
442 LOG(INFO) << "operations" << toString(model.operations);
443 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
444 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
445 LOG(INFO) << "operandValues size" << model.operandValues.size();
Miao Wang64031fa2018-04-10 15:20:53 -0700446 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Michael Butler75886e72018-01-23 11:05:43 -0800447}
448
449void logModelToInfo(const V1_1::Model& model) {
450 LOG(INFO) << "V1_1::Model start";
Jean-Luc Brouillet1da8fed2017-10-11 22:34:04 -0700451 LOG(INFO) << "operands" << toString(model.operands);
452 LOG(INFO) << "operations" << toString(model.operations);
453 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
454 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000455 LOG(INFO) << "operandValues size " << model.operandValues.size();
Miao Wang64031fa2018-04-10 15:20:53 -0700456 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Jean-Luc Brouillet1da8fed2017-10-11 22:34:04 -0700457}
458
Colin Crossbd7f9c42019-10-10 22:58:13 +0000459void logModelToInfo(const V1_2::Model& model) {
460 LOG(INFO) << "V1_2::Model start";
461 LOG(INFO) << "operands" << toString(model.operands);
462 LOG(INFO) << "operations" << toString(model.operations);
463 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
464 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
465 LOG(INFO) << "operandValues size" << model.operandValues.size();
466 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
467 LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
468 LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
469}
470
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000471static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) {
472 LOG(INFO) << label << ".operands" << toString(subgraph.operands);
473 LOG(INFO) << label << ".operations" << toString(subgraph.operations);
474 LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes);
475 LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes);
476}
477
Colin Crossbd7f9c42019-10-10 22:58:13 +0000478void logModelToInfo(const V1_3::Model& model) {
479 LOG(INFO) << "V1_3::Model start";
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000480 logSubgraphToInfo("main", model.main);
481 for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) {
482 logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]);
483 }
484 LOG(INFO) << "operandValues size " << model.operandValues.size();
Colin Crossbd7f9c42019-10-10 22:58:13 +0000485 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000486 LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16;
Colin Crossbd7f9c42019-10-10 22:58:13 +0000487 LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
488}
489
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000490bool validateOperandSymmPerChannelQuantParams(
491 const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
492 const char* tag) {
493 if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
494 return false;
495 }
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000496
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000497 NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
498 NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
499 NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
500 NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
501 << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000502 for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000503 NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000504 }
505 return true;
506}
507
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000508static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
509 NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
510 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
511 return true;
512}
513
514static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
515 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
516 << tag << " invalid zeroPoint: " << type.zeroPoint;
517 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
518 return true;
519}
520
Colin Crossbd7f9c42019-10-10 22:58:13 +0000521static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type,
522 const char* tag) {
523 NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127)
524 << tag << " invalid zeroPoint: " << type.zeroPoint;
525 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
526 return true;
527}
528
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000529static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
530 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
531 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
532 return true;
533}
534
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000535static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
536 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
537 << tag << " invalid zeroPoint: " << type.zeroPoint;
538 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
539 return true;
540}
541
542static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
543 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
544 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
545 return true;
546}
547
548static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
549 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
550 NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
551 return true;
552}
553
Slava Shklyaev0219d522020-03-18 18:03:08 +0000554static bool validateTensorDimensions(
555 const ANeuralNetworksOperandType& type,
556 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
557 bool allowPartial) {
558 if (!allowPartial) {
559 NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700560 }
Slava Shklyaev0219d522020-03-18 18:03:08 +0000561 uint64_t size =
562 isExtensionOperandType(type.type)
563 ? extensionOperandTypeInfo->byteSize
564 : tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, static_cast<int>(type.type));
565 constexpr uint64_t kMaxSize = std::numeric_limits<uint32_t>::max();
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000566 for (uint32_t i = 0; i < type.dimensionCount; i++) {
Slava Shklyaev0219d522020-03-18 18:03:08 +0000567 if (!allowPartial) {
568 NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
569 }
570 if (type.dimensions[i] != 0) {
571 size *= type.dimensions[i];
572 NN_RET_CHECK_LE(size, kMaxSize) << tag << " operand byte size exceeds " << kMaxSize;
573 }
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700574 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000575 return true;
576}
577
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000578static bool validateOperandTypeHelper(
579 const ANeuralNetworksOperandType& type,
580 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
581 bool allowPartial) {
Xusong Wang35740192019-02-22 15:00:01 -0800582 NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000583 if (isExtensionOperandType(type.type)) {
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000584 NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
585 if (extensionOperandTypeInfo->isTensor) {
Slava Shklyaev0219d522020-03-18 18:03:08 +0000586 NN_RET_CHECK(
587 validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000588 } else {
589 NN_RET_CHECK(validateScalarDimensions(type, tag));
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700590 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000591 return validateNoQuantParams(type, tag);
Miao Wang25f0f2d2017-09-28 14:41:37 -0700592 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000593
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000594 NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000595 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
596 << tag << " invalid OperandType: " << type.type;
597
598 bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
599 if (isScalar) {
600 NN_RET_CHECK(validateScalarDimensions(type, tag));
601 if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
602 // to use quantization parameters.
603 NN_RET_CHECK(validateNoQuantParams(type, tag));
Xusong Wanga69b8b12019-01-17 11:30:09 -0800604 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000605 } else {
Slava Shklyaev0219d522020-03-18 18:03:08 +0000606 NN_RET_CHECK(validateTensorDimensions(type, extensionOperandTypeInfo, tag, allowPartial));
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000607 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
608 NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
Colin Crossbd7f9c42019-10-10 22:58:13 +0000609 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
610 NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag));
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000611 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
612 NN_RET_CHECK(validateQuant8SymmParams(type, tag));
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000613 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
614 NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
615 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
616 NN_RET_CHECK(validateQuantSymmParams(type, tag));
617 } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
618 // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
619 } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
620 // Historically, we have allowed OEM types to use quantization parameters.
621 } else {
622 NN_RET_CHECK(validateNoQuantParams(type, tag));
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000623 }
624 }
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000625
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000626 return true;
627}
628
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000629int validateOperandType(const ANeuralNetworksOperandType& type,
630 const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
631 const char* tag, bool allowPartial) {
632 return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
633 ? ANEURALNETWORKS_NO_ERROR
634 : ANEURALNETWORKS_BAD_DATA;
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700635}
636
637int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
638 const char* tag) {
639 for (uint32_t i = 0; i < count; i++) {
640 if (list[i] >= operandCount) {
641 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
642 << ", operandCount " << operandCount;
643 return ANEURALNETWORKS_BAD_DATA;
644 }
645 }
646 return ANEURALNETWORKS_NO_ERROR;
647}
648
Michael Butler43953b82019-07-22 18:59:46 -0700649int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount,
650 const uint32_t* inOperandIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800651 const std::vector<OperandType>& inExpectedTypes,
652 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
653 const std::vector<OperandType>& outExpectedInTypes) {
Slava Shklyaevc2b17632018-12-04 14:17:02 +0000654 if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
655 outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
Slava Shklyaev9616a672018-10-29 18:25:11 +0000656 LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
657 << outExpectedInTypes.size() << " outputs,"
658 << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
Miao Wang137d2782018-03-06 15:03:14 -0800659 return ANEURALNETWORKS_BAD_DATA;
660 }
661 for (uint32_t i = 0; i < inOperandCount; i++) {
662 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
663 LOG(ERROR) << "Invalid input tensor type "
Michael Butler43953b82019-07-22 18:59:46 -0700664 << toString(operands[inOperandIndexes[i]].type) << " for input " << i
665 << ", expected " << toString(inExpectedTypes[i]);
Miao Wang137d2782018-03-06 15:03:14 -0800666 return ANEURALNETWORKS_BAD_DATA;
667 }
668 }
669 for (uint32_t i = 0; i < outOperandCount; i++) {
670 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
671 LOG(ERROR) << "Invalid output tensor type "
Michael Butler43953b82019-07-22 18:59:46 -0700672 << toString(operands[outOperandIndexes[i]].type) << " for input " << i
673 << ", expected " << toString(outExpectedInTypes[i]);
Miao Wang137d2782018-03-06 15:03:14 -0800674 return ANEURALNETWORKS_BAD_DATA;
675 }
676 }
677
678 return ANEURALNETWORKS_NO_ERROR;
679}
680
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000681static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
682 HalVersion minSupportedHalVersion) {
683 if (halVersion < minSupportedHalVersion) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000684 LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000685 << " are only supported in " << toString(minSupportedHalVersion)
686 << " and later (validating using " << toString(halVersion) << ")";
687 return ANEURALNETWORKS_BAD_DATA;
688 }
689 return ANEURALNETWORKS_NO_ERROR;
690}
691
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000692// Checks if two operands have the same types, shapes, and parameters.
693// Omits lifetime, numberOfConsumers, and location.
694static bool compatible(const Operand& a, const Operand& b) {
695 NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type);
696 NN_RET_CHECK(a.dimensions == b.dimensions)
697 << toString(a.dimensions) << " != " << toString(b.dimensions);
698 NN_RET_CHECK_EQ(a.scale, b.scale);
699 NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
700 NN_RET_CHECK(a.extraParams == b.extraParams)
701 << toString(a.extraParams) << " != " << toString(b.extraParams);
702 return true;
703}
704
705static bool validateConditionOperand(const Operand& operand) {
706 NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
707 << "Unexpected condition operand type: " << toString(operand.type);
708 NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
709 NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
710 return true;
711}
712
713static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) {
714 CHECK(helper.isValidSubgraphReference != nullptr);
715 CHECK(helper.getSubgraphInputCount != nullptr);
716 CHECK(helper.getSubgraphOutputCount != nullptr);
717 CHECK(helper.getSubgraphInputOperand != nullptr);
718 CHECK(helper.getSubgraphOutputOperand != nullptr);
719}
720
721static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
722 const uint32_t* outputs, const std::vector<Operand>& operands,
723 const SubgraphValidationHelper& helper) {
724 namespace op = operation_if;
725 checkSubgraphValidationHelper(helper);
726 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs";
727 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output";
728 auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool {
729 NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand))
730 << "Operand is not a valid subgraph reference";
731 const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand);
732 const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand);
733 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount);
734 NN_RET_CHECK_EQ(outputCount, branchModelOutputCount);
735 for (uint32_t i = 0; i < branchModelInputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000736 const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000737 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
738 NN_RET_CHECK(compatible(innerOperand, outerOperand));
739 }
740 for (uint32_t i = 0; i < branchModelOutputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000741 const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000742 const Operand& outerOperand = operands[outputs[i]];
743 NN_RET_CHECK(compatible(innerOperand, outerOperand));
744 }
745 return true;
746 };
747 NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]]))
748 << "Validation failed for IF condition operand";
749 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]]))
750 << "Validation failed for IF then model";
751 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]]))
752 << "Validation failed for IF else model";
753 return true;
754}
755
756static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
757 uint32_t outputCount, const uint32_t* outputs,
758 const std::vector<Operand>& operands,
759 const SubgraphValidationHelper& helper) {
760 // Let the loop have
761 // - m >= 1 input-output operands,
762 // - k >= 0 state-only operands, and
763 // - n >= 0 input-only operands.
764 // Then
765 // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs.
766 // - the condition model has (m + k + n) inputs and 1 output.
767 // - the body model has (m + k + n) inputs and (m + k) outputs.
768 namespace op = operation_while;
769 checkSubgraphValidationHelper(helper);
770 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs";
771 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output";
772 auto validateCondOperand = [&](const Operand& condModelOperand) -> bool {
773 NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand))
774 << "Operand is not a valid subgraph reference";
775 const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand);
776 const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand);
777 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount);
778 NN_RET_CHECK_EQ(condModelOutputCount, 1u);
779 for (uint32_t i = 0; i < condModelInputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000780 const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000781 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
782 NN_RET_CHECK(compatible(innerOperand, outerOperand));
783 }
784 NN_RET_CHECK(
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000785 validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0)));
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000786 return true;
787 };
788 auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool {
789 NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand))
790 << "Operand is not a valid subgraph reference";
791 const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand);
792 const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand);
793 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount);
794 NN_RET_CHECK_GE(bodyModelOutputCount, outputCount);
795 NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount);
796 const uint32_t inputOutputCount = outputCount;
797 const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount;
798 const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount;
799 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000800 const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000801 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
802 NN_RET_CHECK(compatible(innerOperand, outerOperand));
803 }
804 for (uint32_t i = 0; i < inputOutputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000805 const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000806 const Operand& outerOperand = operands[outputs[i]];
807 NN_RET_CHECK(compatible(innerOperand, outerOperand));
808 }
809 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000810 const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
811 const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000812 NN_RET_CHECK(compatible(inputOperand, outputOperand));
813 }
814 return true;
815 };
816 NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]]))
817 << "Validation failed for WHILE condition model";
818 NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]]))
819 << "Validation failed for WHILE body model";
820 return true;
821}
822
823static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
824 const uint32_t* inputIndexes, uint32_t outputCount,
825 const uint32_t* outputIndexes,
826 const std::vector<hal::Operand>& operands,
827 HalVersion halVersion) {
828 if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
829 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
830 LOG(ERROR) << "This validateOperation() overload does not support control flow";
831 return ANEURALNETWORKS_BAD_DATA;
832 }
833 return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands,
834 halVersion, {});
835}
836
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100837int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
838 const uint32_t* inputIndexes, uint32_t outputCount,
839 const uint32_t* outputIndexes, const std::vector<Operand>& operands,
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000840 HalVersion halVersion, const SubgraphValidationHelper& helper) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000841 NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
842 static_cast<uint32_t>(operands.size()),
843 "ANeuralNetworksModel_addOperation inputs"));
844 NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
845 static_cast<uint32_t>(operands.size()),
846 "ANeuralNetworksModel_addOperation outputs"));
847
848 if (isExtensionOperationType(opType)) {
849 if (halVersion < HalVersion::V1_2) {
850 LOG(ERROR)
851 << "Extension operations are supported since HAL version 1.2, validating using "
852 << toString(halVersion);
853 return ANEURALNETWORKS_BAD_DATA;
854 }
855 // There is no other validation we can do for an extension operation.
856 return ANEURALNETWORKS_NO_ERROR;
Miao Wang137d2782018-03-06 15:03:14 -0800857 }
858
859 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000860 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
861 << ") or output operands (" << outputCount << ", expected " << expOut
862 << ") for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800863 };
864
865 switch (opType) {
866 case ANEURALNETWORKS_OEM_OPERATION: {
867 return ANEURALNETWORKS_NO_ERROR;
868 }
Miao Wang137d2782018-03-06 15:03:14 -0800869 case ANEURALNETWORKS_RESHAPE: {
870 if (inputCount != 2 || outputCount != 1) {
871 logInvalidInOutNumber(2, 1);
872 return ANEURALNETWORKS_BAD_DATA;
873 }
874 auto inputType = operands[inputIndexes[0]].type;
875 std::vector<OperandType> inExpectedTypes;
876 std::vector<OperandType> outExpectedTypes;
877 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000878 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700879 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800880 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000881 } else if (inputType == OperandType::TENSOR_FLOAT16) {
882 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
883 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
884 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800885 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000886 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700887 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800888 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleev5db57cf2019-11-25 15:11:31 +0000889 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
890 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
891 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
892 OperandType::TENSOR_INT32};
893 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800894 } else {
895 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000896 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800897 return ANEURALNETWORKS_BAD_DATA;
898 }
Lev Proleev9fa29bc2020-03-18 15:20:46 +0000899 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
900 if (inputRank > 4) {
901 LOG(ERROR) << "Unsupported input tensor rank for operation "
902 << getOperationName(opType);
903 return ANEURALNETWORKS_BAD_DATA;
904 }
Michael Butler43953b82019-07-22 18:59:46 -0700905 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
906 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800907 outExpectedTypes);
908 }
Miao Wang137d2782018-03-06 15:03:14 -0800909 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700910 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
911 LOG(ERROR) << "Invalid number of input operands (" << inputCount
912 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000913 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800914 return ANEURALNETWORKS_BAD_DATA;
915 }
916 auto inputType = operands[inputIndexes[0]].type;
917 std::vector<OperandType> inExpectedTypes;
918 std::vector<OperandType> outExpectedTypes;
919 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000920 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700921 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800922 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000923 } else if (inputType == OperandType::TENSOR_FLOAT16) {
924 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
925 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
926 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800927 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000928 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700929 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800930 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +0000931 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
932 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
933 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
934 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800935 } else {
936 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000937 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800938 return ANEURALNETWORKS_BAD_DATA;
939 }
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700940 if (inputCount == 3) {
941 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000942 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100943 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000944 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700945 }
Michael Butler43953b82019-07-22 18:59:46 -0700946 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
947 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800948 outExpectedTypes);
949 }
950 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
Xusong Wangbb83d492018-09-26 13:38:43 -0700951 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
952 LOG(ERROR) << "Invalid number of input operands (" << inputCount
953 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000954 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800955 return ANEURALNETWORKS_BAD_DATA;
956 }
957 auto inputType = operands[inputIndexes[0]].type;
958 std::vector<OperandType> inExpectedTypes;
959 std::vector<OperandType> outExpectedTypes;
960 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000961 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700962 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800963 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000964 } else if (inputType == OperandType::TENSOR_FLOAT16) {
965 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
966 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
967 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800968 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000969 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700970 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800971 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +0000972 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
973 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
974 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
975 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800976 } else {
977 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000978 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800979 return ANEURALNETWORKS_BAD_DATA;
980 }
Xusong Wangbb83d492018-09-26 13:38:43 -0700981 if (inputCount == 3) {
982 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000983 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100984 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000985 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Xusong Wangbb83d492018-09-26 13:38:43 -0700986 }
Michael Butler43953b82019-07-22 18:59:46 -0700987 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
988 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800989 outExpectedTypes);
990 }
Miao Wang137d2782018-03-06 15:03:14 -0800991 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
Michael Butlere538bb02018-03-26 14:24:49 -0700992 if (inputCount != 2 || outputCount != 1) {
993 logInvalidInOutNumber(2, 1);
994 return ANEURALNETWORKS_BAD_DATA;
995 }
996 auto inputType = operands[inputIndexes[1]].type;
Lev Proleev23776d52019-12-09 18:05:55 +0000997 if (inputType != OperandType::TENSOR_FLOAT16 &&
998 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +0000999 inputType != OperandType::TENSOR_INT32 &&
Lev Proleev23776d52019-12-09 18:05:55 +00001000 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
1001 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001002 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001003 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001004 return ANEURALNETWORKS_BAD_DATA;
1005 }
Michael Butler43953b82019-07-22 18:59:46 -07001006 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
Michael Butlere538bb02018-03-26 14:24:49 -07001007 std::vector<OperandType> outExpectedTypes = {inputType};
Lev Proleev23776d52019-12-09 18:05:55 +00001008 if (inputType == OperandType::TENSOR_FLOAT16 ||
1009 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1010 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1011 } else if (inputType == OperandType::TENSOR_INT32 ||
1012 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
1013 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1014 } else {
1015 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1016 }
Michael Butler43953b82019-07-22 18:59:46 -07001017 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1018 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001019 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001020 }
1021 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
Michael Butlere538bb02018-03-26 14:24:49 -07001022 if (inputCount != 3 || outputCount != 2) {
1023 logInvalidInOutNumber(3, 2);
1024 return ANEURALNETWORKS_BAD_DATA;
1025 }
1026 auto inputType = operands[inputIndexes[2]].type;
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001027 if (inputType != OperandType::TENSOR_FLOAT32 &&
1028 inputType != OperandType::TENSOR_INT32 &&
1029 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
1030 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001031 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001032 return ANEURALNETWORKS_BAD_DATA;
1033 }
Michael Butlere538bb02018-03-26 14:24:49 -07001034 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
Michael Butler43953b82019-07-22 18:59:46 -07001035 OperandType::TENSOR_INT32, inputType};
Michael Butlere538bb02018-03-26 14:24:49 -07001036 std::vector<OperandType> outExpectedTypes = {inputType,
1037 OperandType::TENSOR_QUANT8_ASYMM};
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001038 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -07001039 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1040 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001041 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001042 }
1043 case ANEURALNETWORKS_LSH_PROJECTION: {
Michael Butlere538bb02018-03-26 14:24:49 -07001044 if (inputCount != 4 || outputCount != 1) {
1045 logInvalidInOutNumber(4, 1);
1046 return ANEURALNETWORKS_BAD_DATA;
1047 }
1048 auto inputType = operands[inputIndexes[1]].type;
Michael K. Sanders18493a62018-12-07 14:30:34 +00001049 if (inputType != OperandType::TENSOR_FLOAT16 &&
1050 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001051 inputType != OperandType::TENSOR_INT32 &&
1052 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
1053 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001054 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001055 return ANEURALNETWORKS_BAD_DATA;
1056 }
Michael K. Sanders18493a62018-12-07 14:30:34 +00001057 auto hashType = operands[inputIndexes[0]].type;
1058 std::vector<OperandType> inExpectedTypes;
1059 if (hashType == OperandType::TENSOR_FLOAT16) {
1060 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1061 inExpectedTypes = {
1062 OperandType::TENSOR_FLOAT16,
1063 inputType,
1064 OperandType::TENSOR_FLOAT16,
1065 OperandType::INT32,
1066 };
1067 } else if (hashType == OperandType::TENSOR_FLOAT32) {
1068 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1069 inExpectedTypes = {
1070 OperandType::TENSOR_FLOAT32,
1071 inputType,
1072 OperandType::TENSOR_FLOAT32,
1073 OperandType::INT32,
1074 };
1075 } else {
1076 LOG(ERROR) << "Unsupported hash tensor type for operation "
1077 << getOperationName(opType);
1078 return ANEURALNETWORKS_BAD_DATA;
1079 }
Michael Butlere538bb02018-03-26 14:24:49 -07001080 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
Michael K. Sanders18493a62018-12-07 14:30:34 +00001081 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1082 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001083 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001084 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001085 case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
1086 std::vector<OperandType> inExpectedTypes;
1087 auto inputType = operands[inputIndexes[0]].type;
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001088 if (inputType != OperandType::TENSOR_FLOAT32 &&
1089 inputType != OperandType::TENSOR_FLOAT16) {
1090 LOG(ERROR) << "Unsupported input tensor type for operation "
1091 << getOperationName(opType);
1092 return ANEURALNETWORKS_BAD_DATA;
1093 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001094
1095 inExpectedTypes = {};
1096 for (int i = 0; i < 48; ++i) {
1097 inExpectedTypes.push_back(inputType);
1098 }
1099 inExpectedTypes.push_back(OperandType::INT32);
1100 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1101 ? OperandType::FLOAT32
1102 : OperandType::FLOAT16);
1103 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1104 ? OperandType::FLOAT32
1105 : OperandType::FLOAT16);
1106 inExpectedTypes.push_back(OperandType::BOOL);
1107 inExpectedTypes.push_back(OperandType::BOOL);
Viet Dang985c2142019-03-19 23:19:27 +00001108 for (int i = 0; i < 8; ++i) {
1109 inExpectedTypes.push_back(inputType);
1110 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001111
Lev Proleev42f87922020-01-14 15:40:39 +00001112 const uint32_t kNumOutputs = 2;
1113 const uint32_t kNumOutputsMerged = 1;
1114 const uint32_t kNumOutputsWithState = 6;
1115 const uint32_t kNumOutputsMergedWithState = 5;
1116
1117 if (inputCount != 61 ||
1118 (outputCount != kNumOutputs && outputCount != kNumOutputsMerged &&
1119 outputCount != kNumOutputsWithState &&
1120 outputCount != kNumOutputsMergedWithState)) {
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001121 LOG(ERROR) << "Invalid number of input operands (" << inputCount
Viet Dang4c927942019-03-27 12:00:18 +00001122 << ", expected 61) or output operands (" << outputCount
Lev Proleev42f87922020-01-14 15:40:39 +00001123 << ", expected 1, 2, 5 or 6) for operation " << getOperationName(opType);
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001124 return ANEURALNETWORKS_BAD_DATA;
1125 }
Lev Proleev42f87922020-01-14 15:40:39 +00001126 HalVersion minSupportedHalVersion = HalVersion::V1_2;
1127 if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) {
1128 minSupportedHalVersion = HalVersion::V1_3;
1129 }
1130 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion));
1131 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
Viet Dang4c927942019-03-27 12:00:18 +00001132 auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
1133 inExpectedTypes, outputCount, outputIndexes,
1134 outExpectedTypes);
Viet Dang4c927942019-03-27 12:00:18 +00001135 return status;
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001136 }
Miao Wang137d2782018-03-06 15:03:14 -08001137 case ANEURALNETWORKS_LSTM: {
Lev Proleev7809c452018-11-08 12:06:38 +00001138 std::vector<OperandType> inExpectedTypes;
1139 std::vector<OperandType> outExpectedTypes;
Lev Proleev1d246aa2018-12-28 13:24:24 +00001140 auto inputType = operands[inputIndexes[0]].type;
1141 if (inputType != OperandType::TENSOR_FLOAT32 &&
1142 inputType != OperandType::TENSOR_FLOAT16) {
1143 LOG(ERROR) << "Unsupported input tensor type for operation "
1144 << getOperationName(opType);
1145 return ANEURALNETWORKS_BAD_DATA;
1146 }
1147
1148 inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
1149 inputType, inputType, inputType, inputType, inputType,
1150 inputType, inputType, inputType, inputType, inputType,
1151 inputType, inputType, inputType, inputType, inputType,
1152 OperandType::INT32};
1153 if (inputType == OperandType::TENSOR_FLOAT32) {
1154 inExpectedTypes.push_back(OperandType::FLOAT32);
1155 inExpectedTypes.push_back(OperandType::FLOAT32);
1156 } else {
Slava Shklyaev2e79d152019-05-07 14:41:18 +01001157 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleev1d246aa2018-12-28 13:24:24 +00001158 inExpectedTypes.push_back(OperandType::FLOAT16);
1159 inExpectedTypes.push_back(OperandType::FLOAT16);
1160 }
1161
1162 outExpectedTypes = {inputType, inputType, inputType, inputType};
Lev Proleev7809c452018-11-08 12:06:38 +00001163 if (inputCount == 23 && outputCount == 4) {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001164 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Lev Proleev7809c452018-11-08 12:06:38 +00001165 } else if (inputCount == 27 && outputCount == 4) {
Lev Proleev1d246aa2018-12-28 13:24:24 +00001166 for (int i = 0; i < 4; ++i) {
1167 inExpectedTypes.push_back(inputType);
1168 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001169 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleev7809c452018-11-08 12:06:38 +00001170 } else {
1171 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1172 << ", expected 23 or 27) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001173 << ", expected 4) for operation " << getOperationName(opType);
Michael Butlere538bb02018-03-26 14:24:49 -07001174 return ANEURALNETWORKS_BAD_DATA;
1175 }
Lev Proleev1d246aa2018-12-28 13:24:24 +00001176 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1177 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001178 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001179 }
Lev Proleevb24320a2018-09-25 11:05:58 +01001180 case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
Lev Proleevd9a1a522018-12-21 17:02:56 +00001181 if (inputCount != 15 || outputCount != 2) {
1182 logInvalidInOutNumber(15, 2);
Lev Proleevb24320a2018-09-25 11:05:58 +01001183 return ANEURALNETWORKS_BAD_DATA;
1184 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001185 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleevb24320a2018-09-25 11:05:58 +01001186 std::vector<OperandType> inExpectedTypes = {
1187 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
Lev Proleevd9a1a522018-12-21 17:02:56 +00001188 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1189 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1190 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
Lev Proleevb24320a2018-09-25 11:05:58 +01001191 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
Lev Proleevd9a1a522018-12-21 17:02:56 +00001192 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1193 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
1194 OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleevc077c612018-12-20 15:58:35 +00001195 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
1196 OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleevb24320a2018-09-25 11:05:58 +01001197 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1198 inExpectedTypes, outputCount, outputIndexes,
1199 outExpectedTypes);
1200 }
Michael K. Sanders62498452018-09-25 11:53:55 +01001201 case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
1202 if (inputCount != 3 || outputCount != 1) {
Lev Proleevc9fd2062019-01-28 14:15:10 +00001203 logInvalidInOutNumber(3, 1);
Michael K. Sanders62498452018-09-25 11:53:55 +01001204 return ANEURALNETWORKS_BAD_DATA;
1205 }
Michael K. Sanders688b2552018-11-14 11:09:46 +00001206 OperandType inputType = operands[inputIndexes[0]].type;
1207 std::vector<OperandType> inExpectedTypes;
Michael K. Sanders95c6c352018-12-07 10:08:07 +00001208 if (inputType == OperandType::TENSOR_FLOAT32 ||
1209 inputType == OperandType::TENSOR_FLOAT16) {
Michael K. Sanders688b2552018-11-14 11:09:46 +00001210 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1211 inExpectedTypes = {
Michael K. Sanders95c6c352018-12-07 10:08:07 +00001212 inputType,
Michael K. Sanders688b2552018-11-14 11:09:46 +00001213 OperandType::INT32,
1214 OperandType::TENSOR_INT32,
1215 };
1216 } else {
1217 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaev6e912802019-01-07 14:16:49 +00001218 << getOperationName(opType);
Michael K. Sanders688b2552018-11-14 11:09:46 +00001219 return ANEURALNETWORKS_BAD_DATA;
1220 }
Michael K. Sanders62498452018-09-25 11:53:55 +01001221 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1222 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1223 inExpectedTypes, outputCount, outputIndexes,
1224 outExpectedTypes);
1225 }
Miao Wang137d2782018-03-06 15:03:14 -08001226 case ANEURALNETWORKS_RNN: {
Michael Butlere538bb02018-03-26 14:24:49 -07001227 if (inputCount != 6 || outputCount != 2) {
1228 logInvalidInOutNumber(6, 2);
1229 return ANEURALNETWORKS_BAD_DATA;
1230 }
Michael K. Sandersca549302018-12-06 16:29:07 +00001231 OperandType inputType = operands[inputIndexes[0]].type;
1232 std::vector<OperandType> inExpectedTypes;
1233 std::vector<OperandType> outExpectedTypes;
1234 if (inputType == OperandType::TENSOR_FLOAT32) {
1235 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1236 inExpectedTypes = {
1237 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1238 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1239 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1240 };
1241 outExpectedTypes = {
1242 OperandType::TENSOR_FLOAT32,
1243 OperandType::TENSOR_FLOAT32,
1244 };
1245 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1246 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1247 inExpectedTypes = {
1248 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1249 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1250 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1251 };
1252 outExpectedTypes = {
1253 OperandType::TENSOR_FLOAT16,
1254 OperandType::TENSOR_FLOAT16,
1255 };
1256 } else {
1257 LOG(ERROR) << "Unsupported input tensor type for operation "
1258 << getOperationName(opType);
1259 return ANEURALNETWORKS_BAD_DATA;
1260 }
1261 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1262 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001263 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001264 }
1265 case ANEURALNETWORKS_SVDF: {
Michael Butlere538bb02018-03-26 14:24:49 -07001266 if (inputCount != 7 || outputCount != 2) {
1267 logInvalidInOutNumber(7, 2);
1268 return ANEURALNETWORKS_BAD_DATA;
1269 }
Michael K. Sanders105714c2018-12-13 10:43:43 +00001270 OperandType inputType = operands[inputIndexes[0]].type;
1271 if (inputType == OperandType::TENSOR_FLOAT32) {
1272 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1273
1274 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1275 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1276 } else {
1277 LOG(ERROR) << "Unsupported input tensor type for operation "
1278 << getOperationName(opType);
1279 return ANEURALNETWORKS_BAD_DATA;
1280 }
1281 std::vector<OperandType> inExpectedTypes = {
1282 inputType, inputType, inputType, inputType,
1283 inputType, OperandType::INT32, OperandType::INT32,
1284 };
1285 std::vector<OperandType> outExpectedTypes = {inputType, inputType};
1286 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1287 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001288 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001289 }
1290 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
Xusong Wange7523442018-09-26 14:19:14 -07001291 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
1292 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1293 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001294 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001295 return ANEURALNETWORKS_BAD_DATA;
1296 }
1297 auto inputType = operands[inputIndexes[0]].type;
1298 std::vector<OperandType> inExpectedTypes;
1299 std::vector<OperandType> outExpectedTypes;
1300 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001301 inExpectedTypes = {
1302 OperandType::TENSOR_FLOAT32,
1303 OperandType::TENSOR_INT32,
1304 };
Miao Wang137d2782018-03-06 15:03:14 -08001305 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001306 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1307 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1308 inExpectedTypes = {
1309 OperandType::TENSOR_FLOAT16,
1310 OperandType::TENSOR_INT32,
1311 };
1312 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -08001313 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001314 inExpectedTypes = {
1315 OperandType::TENSOR_QUANT8_ASYMM,
1316 OperandType::TENSOR_INT32,
1317 };
Miao Wang137d2782018-03-06 15:03:14 -08001318 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001319 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1320 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1321 inExpectedTypes = {
1322 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1323 OperandType::TENSOR_INT32,
1324 };
1325 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -08001326 } else {
1327 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001328 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001329 return ANEURALNETWORKS_BAD_DATA;
1330 }
Xusong Wange7523442018-09-26 14:19:14 -07001331 if (inputCount == 3) {
1332 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001333 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +01001334 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001335 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Xusong Wange7523442018-09-26 14:19:14 -07001336 }
Michael Butler43953b82019-07-22 18:59:46 -07001337 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1338 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001339 outExpectedTypes);
1340 }
1341 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
Xusong Wang36782bb2018-09-26 14:45:34 -07001342 if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
1343 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1344 << ", expected 4 or 3) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001345 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001346 return ANEURALNETWORKS_BAD_DATA;
1347 }
1348 auto inputType = operands[inputIndexes[0]].type;
1349 std::vector<OperandType> inExpectedTypes;
1350 std::vector<OperandType> outExpectedTypes;
1351 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001352 inExpectedTypes = {
1353 OperandType::TENSOR_FLOAT32,
1354 OperandType::TENSOR_INT32,
1355 OperandType::TENSOR_INT32,
1356 };
Miao Wang137d2782018-03-06 15:03:14 -08001357 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001358 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1359 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1360 inExpectedTypes = {
1361 OperandType::TENSOR_FLOAT16,
1362 OperandType::TENSOR_INT32,
1363 OperandType::TENSOR_INT32,
1364 };
1365 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -08001366 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001367 if (operands[inputIndexes[0]].zeroPoint != 0) {
1368 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1369 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001370 inExpectedTypes = {
1371 OperandType::TENSOR_QUANT8_ASYMM,
1372 OperandType::TENSOR_INT32,
1373 OperandType::TENSOR_INT32,
1374 };
Miao Wang137d2782018-03-06 15:03:14 -08001375 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001376 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1377 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1378 inExpectedTypes = {
1379 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1380 OperandType::TENSOR_INT32,
1381 OperandType::TENSOR_INT32,
1382 };
1383 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -08001384 } else {
1385 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001386 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001387 return ANEURALNETWORKS_BAD_DATA;
1388 }
Xusong Wang36782bb2018-09-26 14:45:34 -07001389 if (inputCount == 4) {
1390 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001391 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +01001392 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001393 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Xusong Wang36782bb2018-09-26 14:45:34 -07001394 }
Michael Butler43953b82019-07-22 18:59:46 -07001395 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1396 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001397 outExpectedTypes);
1398 }
1399 case ANEURALNETWORKS_PAD: {
1400 if (inputCount != 2 || outputCount != 1) {
1401 logInvalidInOutNumber(2, 1);
1402 return ANEURALNETWORKS_BAD_DATA;
1403 }
1404 auto inputType = operands[inputIndexes[0]].type;
1405 std::vector<OperandType> inExpectedTypes;
1406 std::vector<OperandType> outExpectedTypes;
1407 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001408 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1409 inExpectedTypes = {
1410 OperandType::TENSOR_FLOAT32,
1411 OperandType::TENSOR_INT32,
1412 };
Miao Wang137d2782018-03-06 15:03:14 -08001413 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001414 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1415 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1416 inExpectedTypes = {
1417 OperandType::TENSOR_FLOAT16,
1418 OperandType::TENSOR_INT32,
1419 };
1420 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001421 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1422 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1423 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1424 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001425 } else {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001426 if (operands[inputIndexes[0]].zeroPoint == 0) {
1427 NN_RETURN_IF_ERROR(
1428 validateHalVersion(opType, halVersion, HalVersion::V1_1));
1429 } else {
1430 NN_RETURN_IF_ERROR(
1431 validateHalVersion(opType, halVersion, HalVersion::V1_2));
1432 }
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001433 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001434 inExpectedTypes = {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001435 inputType,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001436 OperandType::TENSOR_INT32,
1437 };
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001438 outExpectedTypes = {inputType};
Miao Wang137d2782018-03-06 15:03:14 -08001439 } else {
1440 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001441 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001442 return ANEURALNETWORKS_BAD_DATA;
1443 }
Lev Proleev9fa29bc2020-03-18 15:20:46 +00001444 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1445 if (inputRank > 4) {
1446 LOG(ERROR) << "Unsupported input tensor rank for operation "
1447 << getOperationName(opType);
1448 return ANEURALNETWORKS_BAD_DATA;
1449 }
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001450 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1451 inExpectedTypes, outputCount, outputIndexes,
1452 outExpectedTypes);
1453 }
1454 case ANEURALNETWORKS_PAD_V2: {
1455 if (inputCount != 3 || outputCount != 1) {
Lev Proleev08ca0742019-04-16 14:50:30 +01001456 logInvalidInOutNumber(3, 1);
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001457 return ANEURALNETWORKS_BAD_DATA;
1458 }
1459 auto inputType = operands[inputIndexes[0]].type;
1460 std::vector<OperandType> inExpectedTypes;
1461 std::vector<OperandType> outExpectedTypes;
1462 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001463 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1464 inExpectedTypes = {
1465 OperandType::TENSOR_FLOAT32,
1466 OperandType::TENSOR_INT32,
1467 OperandType::FLOAT32,
1468 };
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001469 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001470 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1471 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1472 inExpectedTypes = {
1473 OperandType::TENSOR_FLOAT16,
1474 OperandType::TENSOR_INT32,
Xusong Wange2225d02018-12-05 15:35:40 -08001475 OperandType::FLOAT16,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001476 };
1477 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001478 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1479 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1480 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1481 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1482 } else {
1483 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1484 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001485 inExpectedTypes = {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001486 inputType,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001487 OperandType::TENSOR_INT32,
1488 OperandType::INT32,
1489 }; // TODO(b/116699425): Make it UINT8.
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001490 outExpectedTypes = {inputType};
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001491 } else {
1492 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001493 << getOperationName(opType);
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001494 return ANEURALNETWORKS_BAD_DATA;
1495 }
Lev Proleev9fa29bc2020-03-18 15:20:46 +00001496 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1497 if (inputRank > 4) {
1498 LOG(ERROR) << "Unsupported input tensor rank for operation "
1499 << getOperationName(opType);
1500 return ANEURALNETWORKS_BAD_DATA;
1501 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001502 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1503 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001504 outExpectedTypes);
1505 }
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001506 case ANEURALNETWORKS_CAST: {
1507 if (inputCount != 1 || outputCount != 1) {
1508 logInvalidInOutNumber(1, 1);
1509 return ANEURALNETWORKS_BAD_DATA;
1510 }
1511 auto inputType = operands[inputIndexes[0]].type;
1512 auto outputType = operands[outputIndexes[0]].type;
1513 std::vector<OperandType> inExpectedTypes;
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001514 std::vector<OperandType> outExpectedTypes;
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001515 if ((inputType == OperandType::TENSOR_FLOAT16 ||
1516 inputType == OperandType::TENSOR_FLOAT32 ||
1517 inputType == OperandType::TENSOR_INT32 ||
1518 inputType == OperandType::TENSOR_QUANT8_ASYMM) &&
1519 (outputType == OperandType::TENSOR_FLOAT16 ||
1520 outputType == OperandType::TENSOR_FLOAT32 ||
1521 outputType == OperandType::TENSOR_INT32 ||
1522 outputType == OperandType::TENSOR_QUANT8_ASYMM)) {
1523 inExpectedTypes = {inputType};
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001524 outExpectedTypes = {outputType};
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001525 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1526 } else if (inputType == OperandType::TENSOR_BOOL8 ||
1527 inputType == OperandType::TENSOR_QUANT16_ASYMM ||
1528 inputType == OperandType::TENSOR_QUANT16_SYMM ||
1529 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
1530 inputType == OperandType::TENSOR_QUANT8_SYMM) {
1531 inExpectedTypes = {inputType};
1532 outExpectedTypes = {inputType}; // Only identity CAST is supported.
1533 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001534 } else {
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001535 LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType);
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001536 return ANEURALNETWORKS_BAD_DATA;
1537 }
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001538 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1539 inExpectedTypes, outputCount, outputIndexes,
1540 outExpectedTypes);
1541 }
Miao Wang137d2782018-03-06 15:03:14 -08001542 case ANEURALNETWORKS_MEAN: {
1543 if (inputCount != 3 || outputCount != 1) {
1544 logInvalidInOutNumber(3, 1);
1545 return ANEURALNETWORKS_BAD_DATA;
1546 }
Lev Proleev9fa29bc2020-03-18 15:20:46 +00001547 const auto inputRank = operands[inputIndexes[0]].dimensions.size();
1548 if (inputRank > 4) {
1549 LOG(ERROR) << "Unsupported input tensor rank for operation "
1550 << getOperationName(opType);
1551 return ANEURALNETWORKS_BAD_DATA;
1552 }
Miao Wang137d2782018-03-06 15:03:14 -08001553 auto inputType = operands[inputIndexes[0]].type;
Miao Wang137d2782018-03-06 15:03:14 -08001554 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001555 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001556 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1557 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Miao Wang137d2782018-03-06 15:03:14 -08001558 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001559 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Lev Proleev1ee05fa2019-11-25 16:01:01 +00001560 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1561 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Miao Wang137d2782018-03-06 15:03:14 -08001562 } else {
1563 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001564 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001565 return ANEURALNETWORKS_BAD_DATA;
1566 }
Lev Proleev1ee05fa2019-11-25 16:01:01 +00001567 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
1568 OperandType::INT32};
1569 std::vector<OperandType> outExpectedTypes = {inputType};
Michael Butler43953b82019-07-22 18:59:46 -07001570 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1571 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001572 outExpectedTypes);
1573 }
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001574 case ANEURALNETWORKS_ARGMAX:
1575 case ANEURALNETWORKS_ARGMIN: {
1576 if (inputCount != 2 || outputCount != 1) {
1577 logInvalidInOutNumber(2, 1);
1578 return ANEURALNETWORKS_BAD_DATA;
1579 }
1580 auto inputType = operands[inputIndexes[0]].type;
1581 std::vector<OperandType> inExpectedTypes;
1582 std::vector<OperandType> outExpectedTypes;
Slava Shklyaev452dc792018-11-27 19:05:38 +00001583 if (inputType == OperandType::TENSOR_FLOAT16 ||
1584 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001585 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniaka7f10cd2019-11-28 13:27:49 +00001586 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1587 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001588 inExpectedTypes = {inputType, OperandType::INT32};
1589 outExpectedTypes = {OperandType::TENSOR_INT32};
1590 } else {
1591 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001592 << getOperationName(opType);
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001593 return ANEURALNETWORKS_BAD_DATA;
1594 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001595 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001596 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1597 inExpectedTypes, outputCount, outputIndexes,
1598 outExpectedTypes);
1599 }
1600 case ANEURALNETWORKS_EXPAND_DIMS: {
1601 if (inputCount != 2 || outputCount != 1) {
1602 logInvalidInOutNumber(2, 1);
1603 return ANEURALNETWORKS_BAD_DATA;
1604 }
1605 auto inputType = operands[inputIndexes[0]].type;
1606 std::vector<OperandType> inExpectedTypes;
1607 std::vector<OperandType> outExpectedTypes;
Lev Proleeve574bf12018-12-01 00:29:59 +00001608 if (inputType == OperandType::TENSOR_FLOAT16 ||
1609 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001610 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniakd413c3b2019-12-10 13:42:05 +00001611 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1612 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001613 inExpectedTypes = {inputType, OperandType::INT32};
1614 outExpectedTypes = {inputType};
1615 } else {
1616 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001617 << getOperationName(opType);
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001618 return ANEURALNETWORKS_BAD_DATA;
1619 }
Przemyslaw Szczepaniakd413c3b2019-12-10 13:42:05 +00001620 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1621 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1622 } else {
1623 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1624 }
Slava Shklyaev818d7fb2018-10-16 16:06:27 +01001625 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1626 inExpectedTypes, outputCount, outputIndexes,
1627 outExpectedTypes);
1628 }
Lev Proleevdca98d02018-09-13 16:17:58 +01001629 case ANEURALNETWORKS_SPLIT: {
1630 if (inputCount != 3) {
1631 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001632 << getOperationName(opType);
Lev Proleevdca98d02018-09-13 16:17:58 +01001633 return ANEURALNETWORKS_BAD_DATA;
1634 }
1635 auto inputType = operands[inputIndexes[0]].type;
Lev Proleev4dd179e2018-11-15 15:14:12 +00001636 if (inputType != OperandType::TENSOR_FLOAT16 &&
1637 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001638 inputType != OperandType::TENSOR_INT32 &&
Przemyslaw Szczepaniak88929bc2019-11-25 10:43:57 +00001639 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
1640 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001641 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001642 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001643 return ANEURALNETWORKS_BAD_DATA;
1644 }
Przemyslaw Szczepaniak88929bc2019-11-25 10:43:57 +00001645 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1646 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1647 } else {
1648 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1649 }
Lev Proleevdca98d02018-09-13 16:17:58 +01001650 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
1651 OperandType::INT32};
1652 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1653 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1654 inExpectedTypes, outputCount, outputIndexes,
1655 outExpectedTypes);
1656 }
Slava Shklyaev18767482018-10-29 17:31:19 +00001657 case ANEURALNETWORKS_MAXIMUM:
1658 case ANEURALNETWORKS_MINIMUM: {
1659 if (inputCount != 2 || outputCount != 1) {
1660 logInvalidInOutNumber(2, 1);
1661 return ANEURALNETWORKS_BAD_DATA;
1662 }
1663 std::vector<OperandType> inExpectedTypes;
1664 std::vector<OperandType> outExpectedTypes;
1665 OperandType inputType = operands[inputIndexes[0]].type;
Slava Shklyaev53d76842018-11-05 12:22:56 +00001666 if (inputType == OperandType::TENSOR_FLOAT16 ||
1667 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev18767482018-10-29 17:31:19 +00001668 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniak743b2152019-11-28 11:25:21 +00001669 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1670 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev18767482018-10-29 17:31:19 +00001671 inExpectedTypes = {inputType, inputType};
1672 outExpectedTypes = {inputType};
1673 } else {
1674 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001675 << getOperationName(opType);
Slava Shklyaev18767482018-10-29 17:31:19 +00001676 return ANEURALNETWORKS_BAD_DATA;
1677 }
Przemyslaw Szczepaniaka7f10cd2019-11-28 13:27:49 +00001678 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1679 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1680 } else {
1681 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1682 }
Slava Shklyaev18767482018-10-29 17:31:19 +00001683 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1684 inExpectedTypes, outputCount, outputIndexes,
1685 outExpectedTypes);
1686 }
Xusong Wang85a0eb72018-08-17 15:38:32 -07001687 case ANEURALNETWORKS_GROUPED_CONV_2D: {
Xusong Wanga29428d2018-10-15 17:40:16 -07001688 if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
Xusong Wang85a0eb72018-08-17 15:38:32 -07001689 LOG(ERROR) << "Invalid number of input operands (" << inputCount
Xusong Wanga29428d2018-10-15 17:40:16 -07001690 << ", expected 12 or 9) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001691 << ", expected 1) for operation " << getOperationName(opType);
Xusong Wang85a0eb72018-08-17 15:38:32 -07001692 return ANEURALNETWORKS_BAD_DATA;
1693 }
1694 auto inputType = operands[inputIndexes[0]].type;
Przemyslaw Szczepaniak36298242018-12-28 11:52:32 +00001695 auto filterType = operands[inputIndexes[1]].type;
Xusong Wang85a0eb72018-08-17 15:38:32 -07001696 std::vector<OperandType> inExpectedTypes;
1697 std::vector<OperandType> outExpectedTypes;
1698 if (inputType == OperandType::TENSOR_FLOAT32) {
1699 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1700 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1701 OperandType::INT32, OperandType::INT32,
1702 OperandType::INT32, OperandType::INT32};
1703 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Lev Proleeve5a548b2018-11-30 23:27:15 +00001704 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1705 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1706 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1707 OperandType::INT32, OperandType::INT32,
1708 OperandType::INT32, OperandType::INT32};
1709 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001710 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1711 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1712 if (filterType != inputType &&
Przemyslaw Szczepaniak36298242018-12-28 11:52:32 +00001713 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
1714 LOG(ERROR) << "Unsupported filter tensor type for operation "
1715 << getOperationName(opType);
1716 return ANEURALNETWORKS_BAD_DATA;
1717 }
1718
1719 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
1720 operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
1721 LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
1722 << getOperationName(opType);
1723 return ANEURALNETWORKS_BAD_DATA;
1724 }
1725
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001726 inExpectedTypes = {
1727 inputType, filterType, OperandType::TENSOR_INT32,
1728 OperandType::INT32, OperandType::INT32, OperandType::INT32,
1729 OperandType::INT32, OperandType::INT32};
1730 outExpectedTypes = {inputType};
Xusong Wang85a0eb72018-08-17 15:38:32 -07001731 } else {
1732 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001733 << getOperationName(opType);
Xusong Wang85a0eb72018-08-17 15:38:32 -07001734 return ANEURALNETWORKS_BAD_DATA;
1735 }
1736
Xusong Wanga29428d2018-10-15 17:40:16 -07001737 if (inputCount == 12) {
Xusong Wang85a0eb72018-08-17 15:38:32 -07001738 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
1739 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
1740 explicitScalarTypes.end());
1741 }
Xusong Wanga29428d2018-10-15 17:40:16 -07001742 inExpectedTypes.push_back(OperandType::BOOL);
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001743 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1744 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1745 } else {
1746 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1747 }
Xusong Wang85a0eb72018-08-17 15:38:32 -07001748 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1749 inExpectedTypes, outputCount, outputIndexes,
1750 outExpectedTypes);
1751 }
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001752 case ANEURALNETWORKS_TILE: {
1753 if (inputCount != 2 || outputCount != 1) {
1754 logInvalidInOutNumber(2, 1);
1755 return ANEURALNETWORKS_BAD_DATA;
1756 }
1757 auto inputType = operands[inputIndexes[0]].type;
1758 std::vector<OperandType> inExpectedTypes;
1759 std::vector<OperandType> outExpectedTypes;
Slava Shklyaev815cd4e2018-11-05 11:25:38 +00001760 if (inputType == OperandType::TENSOR_FLOAT16 ||
1761 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001762 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniak54cab802019-11-21 19:37:50 +00001763 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1764 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001765 inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1766 outExpectedTypes = {inputType};
1767 } else {
1768 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001769 << getOperationName(opType);
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001770 return ANEURALNETWORKS_BAD_DATA;
1771 }
Przemyslaw Szczepaniak54cab802019-11-21 19:37:50 +00001772 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1773 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1774 } else {
1775 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1776 }
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001777 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1778 inExpectedTypes, outputCount, outputIndexes,
1779 outExpectedTypes);
1780 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001781 case ANEURALNETWORKS_POW: {
1782 if (inputCount != 2 || outputCount != 1) {
1783 logInvalidInOutNumber(2, 1);
1784 return ANEURALNETWORKS_BAD_DATA;
1785 }
Lev Proleev21fbf2b2018-12-23 16:54:26 +00001786 auto inputType = operands[inputIndexes[0]].type;
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001787 std::vector<OperandType> inExpectedTypes;
1788 std::vector<OperandType> outExpectedTypes;
Lev Proleev21fbf2b2018-12-23 16:54:26 +00001789 if (inputType == OperandType::TENSOR_FLOAT16 ||
1790 inputType == OperandType::TENSOR_FLOAT32) {
1791 inExpectedTypes = {inputType, inputType};
1792 outExpectedTypes = {inputType};
1793 } else {
1794 LOG(ERROR) << "Unsupported input tensor type for operation "
1795 << getOperationName(opType);
1796 return ANEURALNETWORKS_BAD_DATA;
1797 }
Przemyslaw Szczepaniak743b2152019-11-28 11:25:21 +00001798 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1799 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1800 } else {
1801 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1802 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001803 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1804 inExpectedTypes, outputCount, outputIndexes,
1805 outExpectedTypes);
1806 }
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +00001807 case ANEURALNETWORKS_IF: {
1808 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1809 return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1810 operands, helper)
1811 ? ANEURALNETWORKS_NO_ERROR
1812 : ANEURALNETWORKS_BAD_DATA;
1813 }
1814 case ANEURALNETWORKS_WHILE: {
1815 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1816 return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1817 operands, helper)
1818 ? ANEURALNETWORKS_NO_ERROR
1819 : ANEURALNETWORKS_BAD_DATA;
1820 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001821 default: {
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001822 const OperationRegistration* operationRegistration =
Slava Shklyaev44b23b42019-01-22 14:23:23 +00001823 BuiltinOperationResolver::get()->findOperation(
1824 static_cast<OperationType>(opType));
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001825 if (operationRegistration == nullptr) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +00001826 if (0 <= opType && opType < kNumberOfOperationTypes) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001827 LOG(ERROR) << getOperationName(opType) << " not registered";
Przemyslaw Szczepaniakbb10cb42018-11-23 13:44:17 +00001828 } else {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +00001829 LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
1830 << kNumberOfOperationTypes << ")";
Przemyslaw Szczepaniakbb10cb42018-11-23 13:44:17 +00001831 }
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001832 return ANEURALNETWORKS_UNEXPECTED_NULL;
1833 }
1834 if (operationRegistration->validate == nullptr) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001835 LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001836 return ANEURALNETWORKS_UNEXPECTED_NULL;
1837 }
Slava Shklyaev432890e2019-09-30 16:04:43 +01001838 OperationValidationContext context(operationRegistration->name, inputCount,
1839 inputIndexes, outputCount, outputIndexes,
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001840 operands.data(), halVersion);
Slava Shklyaeve9b2e372019-04-23 10:49:53 +01001841 if (!operationRegistration->validate(&context)) {
1842 LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
1843 return ANEURALNETWORKS_BAD_DATA;
1844 }
1845 return ANEURALNETWORKS_NO_ERROR;
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001846 }
Miao Wang137d2782018-03-06 15:03:14 -08001847 }
1848}
1849
David Gross07ed4d52018-04-06 14:52:52 -07001850ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1851 switch (resultCode) {
1852 case ANEURALNETWORKS_NO_ERROR:
1853 return ErrorStatus::NONE;
1854
1855 case ANEURALNETWORKS_BAD_DATA:
1856 case ANEURALNETWORKS_UNEXPECTED_NULL:
1857 return ErrorStatus::INVALID_ARGUMENT;
1858
Xusong Wange5be7ce2018-11-07 15:03:29 -08001859 case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
1860 return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
1861
Miao Wange5d644a2019-01-23 14:17:14 -08001862 case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
1863 return ErrorStatus::DEVICE_UNAVAILABLE;
1864
David Gross07ed4d52018-04-06 14:52:52 -07001865 case ANEURALNETWORKS_BAD_STATE:
1866 case ANEURALNETWORKS_INCOMPLETE:
1867 case ANEURALNETWORKS_OP_FAILED:
1868 case ANEURALNETWORKS_OUT_OF_MEMORY:
Miao Wang2dcdbd92018-04-23 10:36:24 -07001869 case ANEURALNETWORKS_UNMAPPABLE:
Michael Butlerf690d312019-12-12 16:25:03 -08001870 case ANEURALNETWORKS_DEAD_OBJECT:
David Gross07ed4d52018-04-06 14:52:52 -07001871 return ErrorStatus::GENERAL_FAILURE;
Michael Butlerf690d312019-12-12 16:25:03 -08001872
1873 case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
1874 return ErrorStatus::MISSED_DEADLINE_TRANSIENT;
1875 case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
1876 return ErrorStatus::MISSED_DEADLINE_PERSISTENT;
1877 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
1878 return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT;
1879 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
1880 return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT;
David Gross07ed4d52018-04-06 14:52:52 -07001881 }
Michael Butlerf690d312019-12-12 16:25:03 -08001882 LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE";
1883 return ErrorStatus::GENERAL_FAILURE;
David Gross07ed4d52018-04-06 14:52:52 -07001884}
1885
1886int convertErrorStatusToResultCode(ErrorStatus status) {
1887 switch (status) {
1888 case ErrorStatus::NONE:
1889 return ANEURALNETWORKS_NO_ERROR;
Miao Wange5d644a2019-01-23 14:17:14 -08001890 case ErrorStatus::DEVICE_UNAVAILABLE:
1891 return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
David Gross07ed4d52018-04-06 14:52:52 -07001892 case ErrorStatus::GENERAL_FAILURE:
David Gross07ed4d52018-04-06 14:52:52 -07001893 return ANEURALNETWORKS_OP_FAILED;
Michael Butlerf690d312019-12-12 16:25:03 -08001894 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1895 return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
1896 case ErrorStatus::INVALID_ARGUMENT:
1897 return ANEURALNETWORKS_BAD_DATA;
1898 case ErrorStatus::MISSED_DEADLINE_TRANSIENT:
1899 return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
1900 case ErrorStatus::MISSED_DEADLINE_PERSISTENT:
1901 return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
1902 case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
1903 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
1904 case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
1905 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
David Gross07ed4d52018-04-06 14:52:52 -07001906 }
Michael Butlerf690d312019-12-12 16:25:03 -08001907 LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
1908 << " mapped to ANEURALNETWORKS_OP_FAILED";
1909 return ANEURALNETWORKS_OP_FAILED;
David Gross07ed4d52018-04-06 14:52:52 -07001910}
1911
Michael Butler4fe318d2019-08-17 17:40:29 -07001912std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
1913 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
1914 constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
1915 std::numeric_limits<uint64_t>::max()};
1916 const int n = convertErrorStatusToResultCode(status);
1917 if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE &&
1918 !outputShapes.empty()) {
1919 LOG(ERROR) << "The driver returned OutputShapes when it shouldn't.";
1920 outputShapes.clear();
1921 }
1922 if (status != ErrorStatus::NONE && timing != kNoTiming) {
1923 LOG(ERROR) << "The driver returned Timing when it shouldn't.";
1924 timing = kNoTiming;
1925 }
1926 return {n, std::move(outputShapes), timing};
1927}
1928
Xusong Wangc4b4cca2019-11-27 11:44:03 -08001929std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
1930 const std::vector<uint32_t>& rhs) {
1931 if (rhs.empty()) return lhs;
1932 if (lhs.empty()) return rhs;
1933 if (lhs.size() != rhs.size()) {
1934 LOG(ERROR) << "Incompatible ranks: " << toString(lhs) << " and " << toString(rhs);
1935 return std::nullopt;
1936 }
1937 std::vector<uint32_t> combined = lhs;
1938 for (uint32_t i = 0; i < lhs.size(); i++) {
1939 if (lhs[i] == 0) {
1940 combined[i] = rhs[i];
1941 } else if (rhs[i] != 0 && lhs[i] != rhs[i]) {
1942 LOG(ERROR) << "Incompatible dimensions: " << toString(lhs) << " and " << toString(rhs);
1943 return std::nullopt;
1944 }
1945 }
1946 return combined;
1947}
1948
Colin Crossbd7f9c42019-10-10 22:58:13 +00001949// Capabilities::operandPerformance utilities.
1950// The field Capabilities::operandPerformance is a vector sorted by the field
1951// Capabilities::OperandPerformance::type.
David Gross5dd79af2019-03-18 15:33:53 -07001952
Colin Crossbd7f9c42019-10-10 22:58:13 +00001953template <HalVersion version>
1954hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
1955 PerformanceInfo perf) {
1956 using OpPerf = VersionedOperandPerformance<version>;
David Gross5dd79af2019-03-18 15:33:53 -07001957
1958 // Note: range presents enumerators in declaration order, not in numerical order.
Colin Crossbd7f9c42019-10-10 22:58:13 +00001959 static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
David Gross5dd79af2019-03-18 15:33:53 -07001960
Slava Shklyaev6bf64d12020-02-18 11:39:54 +00001961 std::vector<OpPerf> ret;
1962 ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
1963 for (VersionedOperandType<version> type : kOperandTypeRange) {
1964 if (static_cast<OperandType>(type) != OperandType::SUBGRAPH) {
1965 ret.push_back(OpPerf{type, perf});
1966 }
1967 }
David Gross5dd79af2019-03-18 15:33:53 -07001968 std::sort(ret.begin(), ret.end(),
1969 [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
1970
1971 return ret;
1972}
1973
Colin Crossbd7f9c42019-10-10 22:58:13 +00001974template hal::hidl_vec<V1_2::Capabilities::OperandPerformance>
1975nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf);
1976template hal::hidl_vec<V1_3::Capabilities::OperandPerformance>
1977nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf);
1978
1979template <HalVersion version>
1980void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
1981 VersionedOperandType<version> type, hal::PerformanceInfo perf) {
David Gross5dd79af2019-03-18 15:33:53 -07001982 CHECK(operandPerformance != nullptr);
Colin Crossbd7f9c42019-10-10 22:58:13 +00001983 const auto it =
1984 std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
1985 [](const VersionedOperandPerformance<version>& perf,
1986 VersionedOperandType<version> type) { return perf.type < type; });
David Gross5dd79af2019-03-18 15:33:53 -07001987 CHECK(it != operandPerformance->end())
1988 << toString(type) << " not in " << toString(*operandPerformance);
1989 it->info = perf;
1990}
1991
Colin Crossbd7f9c42019-10-10 22:58:13 +00001992void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
1993 V1_2::OperandType type, PerformanceInfo perf) {
1994 update<HalVersion::V1_2>(operandPerformance, type, perf);
1995}
1996void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
1997 V1_3::OperandType type, PerformanceInfo perf) {
1998 update<HalVersion::V1_3>(operandPerformance, type, perf);
1999}
2000
2001template <HalVersion version>
2002PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
2003 VersionedOperandType<version> type) {
David Gross5dd79af2019-03-18 15:33:53 -07002004 const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002005 [](const VersionedOperandPerformance<version>& perf,
2006 VersionedOperandType<version> type) {
2007 return static_cast<OperandType>(perf.type) <
2008 static_cast<OperandType>(type);
2009 });
David Gross5dd79af2019-03-18 15:33:53 -07002010 if (it == operandPerformance.end()) {
2011 LOG(WARNING) << "No PerformanceInfo for " << toString(type);
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002012 return kNoPerformanceInfo;
David Gross5dd79af2019-03-18 15:33:53 -07002013 } else {
2014 return it->info;
2015 }
2016}
2017
Colin Crossbd7f9c42019-10-10 22:58:13 +00002018PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
2019 V1_2::OperandType type) {
2020 return lookup<HalVersion::V1_2>(operandPerformance, type);
2021}
2022PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
2023 V1_3::OperandType type) {
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002024 CHECK(type != V1_3::OperandType::SUBGRAPH)
2025 << "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
Colin Crossbd7f9c42019-10-10 22:58:13 +00002026 return lookup<HalVersion::V1_3>(operandPerformance, type);
2027}
2028
Michael Butler75886e72018-01-23 11:05:43 -08002029// Versioning
2030
David Gross5dd79af2019-03-18 15:33:53 -07002031// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
2032// This array must be in sorted order.
2033static const OperandType kQuantized8PerformanceConsistentWithP[] = {
2034 OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
2035 OperandType::TENSOR_OEM_BYTE};
2036
2037static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
2038 const PerformanceInfo quantized8Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002039 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
2040 return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
2041 std::end(kQuantized8PerformanceConsistentWithP),
2042 [quantized8Performance, &capabilities](OperandType type) {
2043 return quantized8Performance ==
2044 lookup(capabilities.operandPerformance,
2045 static_cast<V1_2::OperandType>(type));
2046 });
2047}
2048
2049static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
2050 const PerformanceInfo quantized8Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002051 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
2052 return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
2053 std::end(kQuantized8PerformanceConsistentWithP),
2054 [quantized8Performance, &capabilities](OperandType type) {
2055 return quantized8Performance ==
2056 lookup(capabilities.operandPerformance, type);
2057 });
2058}
2059
2060static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
2061 PerformanceInfo quantized8Performance) {
2062 hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
Colin Crossbd7f9c42019-10-10 22:58:13 +00002063 std::size(kQuantized8PerformanceConsistentWithP));
David Gross5dd79af2019-03-18 15:33:53 -07002064 std::transform(
2065 std::begin(kQuantized8PerformanceConsistentWithP),
2066 std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
2067 [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002068 return {static_cast<V1_2::OperandType>(type), quantized8Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002069 });
2070 return ret;
2071}
2072
2073bool compliantWithV1_0(const V1_0::Capabilities&) {
David Gross47b59162018-02-22 15:05:01 -08002074 return true;
2075}
2076
2077bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
David Gross5dd79af2019-03-18 15:33:53 -07002078 return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
2079}
2080
2081bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
2082 const PerformanceInfo perfTensorFloat32 =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002083 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
2084 const PerformanceInfo perfFloat32 =
2085 lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
2086 if (perfTensorFloat32 != perfFloat32 ||
2087 perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
2088 perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
2089 return false;
2090 }
2091
2092 return isQuantized8PerformanceConsistentWithP(capabilities);
2093}
2094
2095bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
2096 const PerformanceInfo perfTensorFloat32 =
David Gross5dd79af2019-03-18 15:33:53 -07002097 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
2098 const PerformanceInfo perfFloat32 =
2099 lookup(capabilities.operandPerformance, OperandType::FLOAT32);
2100 if (perfTensorFloat32 != perfFloat32 ||
2101 perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
2102 perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
2103 return false;
2104 }
2105
2106 return isQuantized8PerformanceConsistentWithP(capabilities);
David Gross47b59162018-02-22 15:05:01 -08002107}
2108
2109bool compliantWithV1_1(const V1_0::Capabilities&) {
2110 return true;
2111}
2112
2113bool compliantWithV1_1(const V1_1::Capabilities&) {
2114 return true;
2115}
2116
David Gross5dd79af2019-03-18 15:33:53 -07002117bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
2118 if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
2119 capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
Colin Crossbd7f9c42019-10-10 22:58:13 +00002120 (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) !=
2121 lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) {
2122 return false;
2123 }
2124
2125 return isQuantized8PerformanceConsistentWithP(capabilities);
2126}
2127
2128bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
2129 if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
2130 capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
David Gross5dd79af2019-03-18 15:33:53 -07002131 (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
2132 lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
2133 return false;
2134 }
2135
2136 return isQuantized8PerformanceConsistentWithP(capabilities);
2137}
2138
2139bool compliantWithV1_2(const V1_0::Capabilities&) {
2140 return true;
2141}
2142
2143bool compliantWithV1_2(const V1_1::Capabilities&) {
2144 return true;
2145}
2146
Colin Crossbd7f9c42019-10-10 22:58:13 +00002147bool compliantWithV1_2(const V1_2::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002148 return true;
2149}
2150
Colin Crossbd7f9c42019-10-10 22:58:13 +00002151bool compliantWithV1_2(const V1_3::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002152 return true;
2153}
2154
Colin Crossbd7f9c42019-10-10 22:58:13 +00002155bool compliantWithV1_3(const V1_0::Capabilities&) {
2156 return true;
2157}
2158
2159bool compliantWithV1_3(const V1_1::Capabilities&) {
2160 return true;
2161}
2162
2163bool compliantWithV1_3(const V1_2::Capabilities&) {
2164 return true;
2165}
2166
2167bool compliantWithV1_3(const V1_3::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002168 return true;
2169}
2170
Michael Butlerf690d312019-12-12 16:25:03 -08002171V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) {
2172 return status;
2173}
2174
2175V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) {
2176 switch (status) {
2177 case V1_3::ErrorStatus::NONE:
2178 return V1_0::ErrorStatus::NONE;
2179 case V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
2180 return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
2181 case V1_3::ErrorStatus::GENERAL_FAILURE:
2182 return V1_0::ErrorStatus::GENERAL_FAILURE;
2183 case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
2184 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
2185 case V1_3::ErrorStatus::INVALID_ARGUMENT:
2186 return V1_0::ErrorStatus::INVALID_ARGUMENT;
2187 case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
2188 return V1_0::ErrorStatus::GENERAL_FAILURE;
2189 case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
2190 return V1_0::ErrorStatus::GENERAL_FAILURE;
2191 case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
2192 return V1_0::ErrorStatus::GENERAL_FAILURE;
2193 case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
2194 return V1_0::ErrorStatus::GENERAL_FAILURE;
2195 }
2196 LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE";
2197 return V1_0::ErrorStatus::GENERAL_FAILURE;
2198}
2199
2200V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) {
2201 return static_cast<V1_3::ErrorStatus>(status);
2202}
2203
2204V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) {
2205 return status;
2206}
2207
Slava Shklyaeva5055742018-10-15 14:58:25 +01002208static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
Michael Butler75886e72018-01-23 11:05:43 -08002209 return static_cast<V1_0::OperationType>(type);
2210}
2211
Lev Proleev6287d1e2019-10-31 18:12:27 +00002212static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
2213 return static_cast<V1_0::OperationType>(type);
2214}
2215
2216V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) {
2217 return static_cast<V1_0::OperationType>(type);
2218}
2219
Slava Shklyaeva5055742018-10-15 14:58:25 +01002220static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
Michael Butler75886e72018-01-23 11:05:43 -08002221 return static_cast<V1_1::OperationType>(type);
2222}
2223
Lev Proleev6287d1e2019-10-31 18:12:27 +00002224static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
2225 return static_cast<V1_1::OperationType>(type);
2226}
2227
2228V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) {
2229 return static_cast<V1_1::OperationType>(type);
2230}
2231
2232static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
2233 return static_cast<V1_2::OperationType>(type);
2234}
2235
2236static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
2237 return static_cast<V1_2::OperationType>(type);
2238}
2239
2240V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) {
2241 return static_cast<V1_2::OperationType>(type);
2242}
2243
2244static V1_3::OperationType convertToV1_3(V1_0::OperationType type) {
2245 return static_cast<V1_3::OperationType>(type);
2246}
2247
2248static V1_3::OperationType convertToV1_3(V1_1::OperationType type) {
2249 return static_cast<V1_3::OperationType>(type);
2250}
2251
2252static V1_3::OperationType convertToV1_3(V1_2::OperationType type) {
2253 return static_cast<V1_3::OperationType>(type);
2254}
2255
David Gross47b59162018-02-22 15:05:01 -08002256V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
2257 return capabilities;
2258}
2259
2260V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
2261 if (!compliantWithV1_0(capabilities)) {
2262 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2263 << " from V1_1::Capabilities to V1_0::Capabilities";
2264 }
Michael Butler43953b82019-07-22 18:59:46 -07002265 return {.float32Performance = capabilities.float32Performance,
2266 .quantized8Performance = capabilities.quantized8Performance};
David Gross47b59162018-02-22 15:05:01 -08002267}
2268
David Gross5dd79af2019-03-18 15:33:53 -07002269V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
2270 if (!compliantWithV1_0(capabilities)) {
2271 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2272 << " from V1_2::Capabilities to V1_0::Capabilities";
2273 }
2274 return {.float32Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002275 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
2276 .quantized8Performance = lookup(capabilities.operandPerformance,
2277 V1_2::OperandType::TENSOR_QUANT8_ASYMM)};
2278}
2279
2280V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
2281 if (!compliantWithV1_0(capabilities)) {
2282 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2283 << " from V1_3::Capabilities to V1_0::Capabilities";
2284 }
2285 return {.float32Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002286 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
2287 .quantized8Performance =
2288 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
2289}
2290
David Gross47b59162018-02-22 15:05:01 -08002291V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
Michael Butler43953b82019-07-22 18:59:46 -07002292 return {.float32Performance = capabilities.float32Performance,
2293 .quantized8Performance = capabilities.quantized8Performance,
2294 .relaxedFloat32toFloat16Performance = capabilities.float32Performance};
David Gross47b59162018-02-22 15:05:01 -08002295}
2296
2297V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
2298 return capabilities;
2299}
2300
David Gross5dd79af2019-03-18 15:33:53 -07002301V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
2302 if (!compliantWithV1_1(capabilities)) {
2303 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2304 << " from V1_2::Capabilities to V1_1::Capabilities";
2305 }
2306 return {.float32Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002307 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
2308 .quantized8Performance =
2309 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM),
2310 .relaxedFloat32toFloat16Performance =
2311 capabilities.relaxedFloat32toFloat16PerformanceTensor};
2312}
2313
2314V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
2315 if (!compliantWithV1_1(capabilities)) {
2316 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2317 << " from V1_3::Capabilities to V1_1::Capabilities";
2318 }
2319 return {.float32Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002320 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
2321 .quantized8Performance =
2322 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
2323 .relaxedFloat32toFloat16Performance =
2324 capabilities.relaxedFloat32toFloat16PerformanceTensor};
2325}
2326
2327V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
2328 V1_2::Capabilities ret = {
2329 .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
2330 .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
2331 .operandPerformance =
2332 makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
2333 auto& opPerf = ret.operandPerformance;
2334 opPerf.resize(opPerf.size() + 2);
Colin Crossbd7f9c42019-10-10 22:58:13 +00002335 opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
2336 capabilities.float32Performance};
2337 opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002338 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2339 std::sort(opPerf.begin(), opPerf.end(),
2340 [](const OperandPerformance& a, const OperandPerformance& b) {
2341 return a.type < b.type;
2342 });
2343 return ret;
2344}
2345
2346V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
2347 V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
2348 capabilities.relaxedFloat32toFloat16Performance,
2349 .relaxedFloat32toFloat16PerformanceTensor =
2350 capabilities.relaxedFloat32toFloat16Performance,
2351 .operandPerformance = makeQuantized8PerformanceConsistentWithP(
2352 capabilities.quantized8Performance)};
2353 auto& opPerf = ret.operandPerformance;
2354 opPerf.resize(opPerf.size() + 2);
Colin Crossbd7f9c42019-10-10 22:58:13 +00002355 opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
2356 capabilities.float32Performance};
2357 opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002358 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2359 std::sort(opPerf.begin(), opPerf.end(),
2360 [](const OperandPerformance& a, const OperandPerformance& b) {
2361 return a.type < b.type;
2362 });
2363 return ret;
2364}
2365
2366V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
2367 return capabilities;
2368}
2369
Colin Crossbd7f9c42019-10-10 22:58:13 +00002370V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
2371 V1_2::Capabilities ret = {
2372 .relaxedFloat32toFloat16PerformanceScalar =
2373 capabilities.relaxedFloat32toFloat16PerformanceScalar,
2374 .relaxedFloat32toFloat16PerformanceTensor =
2375 capabilities.relaxedFloat32toFloat16PerformanceTensor,
2376 };
2377 const auto& inputOpPerf = capabilities.operandPerformance;
2378 hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
2379 opPerfSupported.resize(inputOpPerf.size());
2380 auto last =
2381 std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
2382 [](V1_3::Capabilities::OperandPerformance opPerf) {
2383 return validOperandType(static_cast<V1_2::OperandType>(opPerf.type));
2384 });
2385 opPerfSupported.resize(std::distance(opPerfSupported.begin(), last));
2386
2387 auto& convertedOpPerf = ret.operandPerformance;
2388 convertedOpPerf.resize(opPerfSupported.size());
2389 std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(),
2390 [](V1_3::Capabilities::OperandPerformance opPerf) {
2391 return V1_2::Capabilities::OperandPerformance{
2392 static_cast<V1_2::OperandType>(opPerf.type), opPerf.info};
2393 });
2394 return ret;
2395}
2396
2397V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) {
2398 return convertToV1_3(convertToV1_2(capabilities));
2399}
2400
2401V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) {
2402 return convertToV1_3(convertToV1_2(capabilities));
2403}
2404
2405V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) {
2406 V1_3::Capabilities ret = {
2407 .relaxedFloat32toFloat16PerformanceScalar =
2408 capabilities.relaxedFloat32toFloat16PerformanceScalar,
2409 .relaxedFloat32toFloat16PerformanceTensor =
2410 capabilities.relaxedFloat32toFloat16PerformanceTensor,
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002411 .ifPerformance = kNoPerformanceInfo,
2412 .whilePerformance = kNoPerformanceInfo,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002413 };
2414 auto& opPerf = ret.operandPerformance;
2415 opPerf.resize(capabilities.operandPerformance.size());
2416 std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
2417 opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) {
2418 return V1_3::Capabilities::OperandPerformance{
2419 static_cast<V1_3::OperandType>(opPerf.type), opPerf.info};
2420 });
2421 return ret;
2422}
2423
2424V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) {
2425 return capabilities;
2426}
2427
Slava Shklyaeva5055742018-10-15 14:58:25 +01002428static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
2429 return {.type = uncheckedConvertToV1_0(operation.type),
Michael Butler75886e72018-01-23 11:05:43 -08002430 .inputs = operation.inputs,
2431 .outputs = operation.outputs};
2432}
2433
Slava Shklyaeva5055742018-10-15 14:58:25 +01002434static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
Michael Butler75886e72018-01-23 11:05:43 -08002435 return {.type = convertToV1_1(operation.type),
2436 .inputs = operation.inputs,
2437 .outputs = operation.outputs};
2438}
2439
Slava Shklyaeva5055742018-10-15 14:58:25 +01002440static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2441 const hidl_vec<V1_1::Operation>& operations) {
Michael Butler75886e72018-01-23 11:05:43 -08002442 hidl_vec<V1_0::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002443 std::transform(
2444 operations.begin(), operations.end(), result.begin(),
2445 [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
Michael Butler75886e72018-01-23 11:05:43 -08002446 return result;
2447}
2448
2449static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
2450 hidl_vec<V1_1::Operation> result(operations.size());
2451 std::transform(operations.begin(), operations.end(), result.begin(),
2452 [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
2453 return result;
2454}
2455
Colin Crossbd7f9c42019-10-10 22:58:13 +00002456bool compliantWithV1_0(const V1_3::Operand& operand) {
Xusong Wangc5978942019-05-08 18:40:42 -07002457 return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
2458 (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002459 operand.dimensions.size() != 0) &&
2460 compliantWithV1_0(operand.lifetime);
Xusong Wangc5978942019-05-08 18:40:42 -07002461}
2462
Colin Crossbd7f9c42019-10-10 22:58:13 +00002463bool compliantWithV1_2(const V1_3::Operand& operand) {
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002464 return validOperandType(static_cast<V1_2::OperandType>(operand.type)) &&
2465 compliantWithV1_0(operand.lifetime);
Michael Butler75886e72018-01-23 11:05:43 -08002466}
2467
Colin Crossbd7f9c42019-10-10 22:58:13 +00002468bool compliantWithV1_3(const V1_3::Operand& operand) {
2469 return true;
Michael Butler75886e72018-01-23 11:05:43 -08002470}
2471
Colin Crossbd7f9c42019-10-10 22:58:13 +00002472static bool compliantWith(HalVersion version, const V1_3::Model& model,
David Grosse5a8e1e2019-05-09 15:55:53 -07002473 std::set<uint32_t>* noncompliantOperations) {
Xusong Wangc5978942019-05-08 18:40:42 -07002474 // A boolean vector indicating whether each pool is compliant with the target HAL version.
2475 std::vector<bool> isPoolCompliant(model.pools.size(), false);
2476 std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
2477 [version](const hidl_memory& pool) { return validatePool(pool, version); });
2478
2479 // A boolean vector indicating whether each operand is compliant with the target HAL version.
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002480 std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
2481 std::transform(model.main.operands.begin(), model.main.operands.end(),
2482 isOperandCompliant.begin(), [&isPoolCompliant, version](const Operand& op) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002483 bool is_operand_compliant = false;
2484 switch (version) {
2485 case HalVersion::UNKNOWN:
2486 is_operand_compliant = false;
2487 break;
2488 case HalVersion::V1_0:
2489 is_operand_compliant = compliantWithV1_0(op);
2490 break;
2491 case HalVersion::V1_1:
2492 // There is no V1_1::Operand -- both V1_0::Model
2493 // and V1_1::Model use V1_0::Operand.
2494 is_operand_compliant = compliantWithV1_0(op);
2495 break;
2496 case HalVersion::V1_2:
2497 is_operand_compliant = compliantWithV1_2(op);
2498 break;
2499 case HalVersion::V1_3:
2500 is_operand_compliant = compliantWithV1_3(op);
2501 break;
2502 }
2503 return is_operand_compliant &&
Xusong Wangc5978942019-05-08 18:40:42 -07002504 !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
2505 !isPoolCompliant[op.location.poolIndex]);
2506 });
2507
2508 auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
2509 return std::all_of(
2510 indices.begin(), indices.end(),
2511 [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
2512 };
2513
Colin Crossbd7f9c42019-10-10 22:58:13 +00002514 auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) {
Xusong Wangc5978942019-05-08 18:40:42 -07002515 if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
David Grosse5a8e1e2019-05-09 15:55:53 -07002516 int error = validateOperation(
2517 static_cast<int32_t>(op.type), op.inputs.size(),
2518 op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002519 op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.main.operands, version);
David Grosse5a8e1e2019-05-09 15:55:53 -07002520 return error == ANEURALNETWORKS_NO_ERROR;
2521 };
2522
2523 if (noncompliantOperations) {
2524 CHECK(noncompliantOperations->empty());
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002525 for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) {
2526 if (!localValidateOperation(model.main.operations[idx])) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002527 noncompliantOperations->insert(idx);
2528 }
2529 }
2530 return noncompliantOperations->empty();
2531 } else {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002532 return std::all_of(model.main.operations.begin(), model.main.operations.end(),
David Grosse5a8e1e2019-05-09 15:55:53 -07002533 localValidateOperation);
2534 }
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002535}
2536
Colin Crossbd7f9c42019-10-10 22:58:13 +00002537bool compliantWithV1_0(const V1_0::Model& model) {
2538 return true;
2539}
2540
2541bool compliantWithV1_0(const V1_1::Model& model) {
2542 // In addition to new enumeration values being introduced in V1_1::Model, a
2543 // new flag was introduced to indicate whether or not float32 data can be
2544 // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
2545 // flag is not relevant in whether a V1_1::Model is compliant with a
2546 // V1_0::Model because all 1.0 drivers require strict calculation by default
2547 // in the P NN runtime. Even if fp16 calculations are allowed, they can
2548 // still be computed by a strict fp32 driver.
2549 return std::all_of(
2550 model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
2551 int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
2552 op.inputs.size() > 0 ? op.inputs.data() : nullptr,
2553 op.outputs.size(),
2554 op.outputs.size() > 0 ? op.outputs.data() : nullptr,
2555 convertToV1_3(model.operands), HalVersion::V1_0);
2556 return error == ANEURALNETWORKS_NO_ERROR;
2557 });
2558}
2559
David Grosse5a8e1e2019-05-09 15:55:53 -07002560bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002561 return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations);
2562}
2563
2564bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002565 return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002566}
2567
Colin Crossbd7f9c42019-10-10 22:58:13 +00002568bool compliantWithV1_1(const V1_0::Model&) {
2569 return true;
2570}
2571
2572bool compliantWithV1_1(const V1_1::Model&) {
2573 return true;
2574}
2575
David Grosse5a8e1e2019-05-09 15:55:53 -07002576bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002577 return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations);
2578}
2579
2580bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002581 return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
2582}
2583
Colin Crossbd7f9c42019-10-10 22:58:13 +00002584bool compliantWithV1_2(const V1_0::Model&) {
2585 return true;
2586}
2587
2588bool compliantWithV1_2(const V1_1::Model&) {
2589 return true;
2590}
2591
2592bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) {
2593 return true;
2594}
2595
2596bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
2597 return compliantWith(HalVersion::V1_2, model, noncompliantOperations);
2598}
2599
Slava Shklyaeva5055742018-10-15 14:58:25 +01002600static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
2601 return {.type = uncheckedConvertToV1_0(operation.type),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002602 .inputs = operation.inputs,
2603 .outputs = operation.outputs};
2604}
2605
Colin Crossbd7f9c42019-10-10 22:58:13 +00002606static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) {
2607 return {.type = uncheckedConvertToV1_0(operation.type),
2608 .inputs = operation.inputs,
2609 .outputs = operation.outputs};
2610}
2611
Slava Shklyaeva5055742018-10-15 14:58:25 +01002612static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
2613 return {.type = uncheckedConvertToV1_1(operation.type),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002614 .inputs = operation.inputs,
2615 .outputs = operation.outputs};
2616}
2617
Colin Crossbd7f9c42019-10-10 22:58:13 +00002618static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) {
2619 return {.type = uncheckedConvertToV1_1(operation.type),
2620 .inputs = operation.inputs,
2621 .outputs = operation.outputs};
2622}
2623
Slava Shklyaeva5055742018-10-15 14:58:25 +01002624static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002625 return {.type = convertToV1_2(operation.type),
2626 .inputs = operation.inputs,
2627 .outputs = operation.outputs};
2628}
2629
Slava Shklyaeva5055742018-10-15 14:58:25 +01002630static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
2631 return {.type = convertToV1_2(operation.type),
2632 .inputs = operation.inputs,
2633 .outputs = operation.outputs};
2634}
2635
Colin Crossbd7f9c42019-10-10 22:58:13 +00002636static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002637 return {.type = uncheckedConvertToV1_2(operation.type),
2638 .inputs = operation.inputs,
2639 .outputs = operation.outputs};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002640}
2641
2642static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002643 return {.type = convertToV1_3(operation.type),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002644 .inputs = operation.inputs,
2645 .outputs = operation.outputs};
2646}
2647
2648static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002649 return {.type = convertToV1_3(operation.type),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002650 .inputs = operation.inputs,
2651 .outputs = operation.outputs};
2652}
2653
2654static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002655 return {.type = convertToV1_3(operation.type),
2656 .inputs = operation.inputs,
2657 .outputs = operation.outputs};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002658}
2659
2660static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2661 const hidl_vec<V1_3::Operation>& operations) {
2662 hidl_vec<V1_0::Operation> result(operations.size());
2663 std::transform(
2664 operations.begin(), operations.end(), result.begin(),
2665 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
2666 return result;
2667}
2668
Slava Shklyaeva5055742018-10-15 14:58:25 +01002669static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2670 const hidl_vec<V1_2::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002671 hidl_vec<V1_0::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002672 std::transform(
2673 operations.begin(), operations.end(), result.begin(),
2674 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002675 return result;
2676}
2677
Colin Crossbd7f9c42019-10-10 22:58:13 +00002678static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
2679 const hidl_vec<V1_3::Operation>& operations) {
2680 hidl_vec<V1_2::Operation> result(operations.size());
2681 std::transform(
2682 operations.begin(), operations.end(), result.begin(),
2683 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
2684 return result;
2685}
2686
Slava Shklyaeva5055742018-10-15 14:58:25 +01002687static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
2688 const hidl_vec<V1_2::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002689 hidl_vec<V1_1::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002690 std::transform(
2691 operations.begin(), operations.end(), result.begin(),
2692 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002693 return result;
2694}
2695
Colin Crossbd7f9c42019-10-10 22:58:13 +00002696static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
2697 const hidl_vec<V1_3::Operation>& operations) {
2698 hidl_vec<V1_1::Operation> result(operations.size());
2699 std::transform(
2700 operations.begin(), operations.end(), result.begin(),
2701 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
2702 return result;
2703}
2704
Slava Shklyaeva5055742018-10-15 14:58:25 +01002705static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002706 hidl_vec<V1_2::Operation> result(operations.size());
2707 std::transform(operations.begin(), operations.end(), result.begin(),
Slava Shklyaeva5055742018-10-15 14:58:25 +01002708 [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
2709 return result;
2710}
2711
2712static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
2713 hidl_vec<V1_2::Operation> result(operations.size());
2714 std::transform(operations.begin(), operations.end(), result.begin(),
2715 [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002716 return result;
2717}
2718
Colin Crossbd7f9c42019-10-10 22:58:13 +00002719static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) {
2720 hidl_vec<V1_3::Operation> result(operations.size());
2721 std::transform(operations.begin(), operations.end(), result.begin(),
2722 [](const V1_0::Operation& operation) { return convertToV1_3(operation); });
2723 return result;
2724}
2725
2726static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) {
2727 hidl_vec<V1_3::Operation> result(operations.size());
2728 std::transform(operations.begin(), operations.end(), result.begin(),
2729 [](const V1_1::Operation& operation) { return convertToV1_3(operation); });
2730 return result;
2731}
2732
2733static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) {
2734 hidl_vec<V1_3::Operation> result(operations.size());
2735 std::transform(operations.begin(), operations.end(), result.begin(),
2736 [](const V1_2::Operation& operation) { return convertToV1_3(operation); });
2737 return result;
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002738}
2739
Slava Shklyaeva5055742018-10-15 14:58:25 +01002740static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002741 return validOperandType(static_cast<V1_0::OperandType>(operandType));
2742}
2743
Colin Crossbd7f9c42019-10-10 22:58:13 +00002744static bool compliantWithV1_0(const V1_3::OperandType& operandType) {
2745 return validOperandType(static_cast<V1_0::OperandType>(operandType));
2746}
2747
2748static bool compliantWithV1_2(const V1_3::OperandType& operandType) {
2749 return validOperandType(static_cast<V1_2::OperandType>(operandType));
2750}
2751
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002752V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
2753 if (!compliantWithV1_0(operandType)) {
2754 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
Colin Crossbd7f9c42019-10-10 22:58:13 +00002755 << " from V1_2::OperandType to V1_0::OperandType";
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002756 }
2757 return static_cast<V1_0::OperandType>(operandType);
2758}
2759
Colin Crossbd7f9c42019-10-10 22:58:13 +00002760V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
2761 return static_cast<V1_2::OperandType>(operandType);
2762}
2763
2764V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) {
2765 if (!compliantWithV1_2(operandType)) {
2766 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
2767 << " from V1_3::OperandType to V1_2::OperandType";
2768 }
2769 return static_cast<V1_2::OperandType>(operandType);
2770}
2771
2772V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
2773 if (!compliantWithV1_0(operandType)) {
2774 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
2775 << " from V1_3::Operand to V1_0::Operand";
2776 }
2777 return static_cast<V1_0::OperandType>(operandType);
2778}
2779
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002780bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) {
2781 return true;
2782}
2783
2784bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime) {
2785 return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
2786}
2787
2788bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime) {
2789 return true;
2790}
2791
2792bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime) {
2793 return true;
2794}
2795
2796V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) {
2797 return lifetime;
2798}
2799
2800V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) {
2801 if (!compliantWithV1_0(lifetime)) {
2802 LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime)
2803 << " from V1_3 to V1_0";
2804 }
2805 return static_cast<V1_0::OperandLifeTime>(lifetime);
2806}
2807
2808V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) {
2809 return static_cast<V1_3::OperandLifeTime>(lifetime);
2810}
2811
2812V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) {
2813 return lifetime;
2814}
2815
Colin Crossbd7f9c42019-10-10 22:58:13 +00002816V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
2817 return {.type = convertToV1_0(operand.type),
2818 .dimensions = operand.dimensions,
2819 .numberOfConsumers = operand.numberOfConsumers,
2820 .scale = operand.scale,
2821 .zeroPoint = operand.zeroPoint,
2822 .lifetime = convertToV1_0(operand.lifetime),
2823 .location = operand.location};
2824}
2825
2826V1_0::Operand convertToV1_0(const V1_3::Operand& operand) {
2827 return {.type = convertToV1_0(operand.type),
2828 .dimensions = operand.dimensions,
2829 .numberOfConsumers = operand.numberOfConsumers,
2830 .scale = operand.scale,
2831 .zeroPoint = operand.zeroPoint,
2832 .lifetime = convertToV1_0(operand.lifetime),
2833 .location = operand.location};
2834}
2835
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002836V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
2837 return {.type = convertToV1_2(operand.type),
2838 .dimensions = operand.dimensions,
2839 .numberOfConsumers = operand.numberOfConsumers,
2840 .scale = operand.scale,
2841 .zeroPoint = operand.zeroPoint,
2842 .lifetime = operand.lifetime,
2843 .location = operand.location};
2844}
2845
Colin Crossbd7f9c42019-10-10 22:58:13 +00002846V1_2::Operand convertToV1_2(const V1_3::Operand& operand) {
2847 return {.type = convertToV1_2(operand.type),
2848 .dimensions = operand.dimensions,
2849 .numberOfConsumers = operand.numberOfConsumers,
2850 .scale = operand.scale,
2851 .zeroPoint = operand.zeroPoint,
2852 .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
2853 .location = operand.location,
Michael Butler2d3826e2020-02-04 16:08:11 -08002854 .extraParams = operand.extraParams};
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002855}
2856
Colin Crossbd7f9c42019-10-10 22:58:13 +00002857V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
2858 return {.type = static_cast<V1_3::OperandType>(operand.type),
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002859 .dimensions = operand.dimensions,
2860 .numberOfConsumers = operand.numberOfConsumers,
2861 .scale = operand.scale,
2862 .zeroPoint = operand.zeroPoint,
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002863 .lifetime = convertToV1_3(operand.lifetime),
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002864 .location = operand.location};
2865}
2866
Colin Crossbd7f9c42019-10-10 22:58:13 +00002867V1_3::Operand convertToV1_3(const V1_2::Operand& operand) {
2868 return {.type = static_cast<V1_3::OperandType>(operand.type),
2869 .dimensions = operand.dimensions,
2870 .numberOfConsumers = operand.numberOfConsumers,
2871 .scale = operand.scale,
2872 .zeroPoint = operand.zeroPoint,
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002873 .lifetime = convertToV1_3(operand.lifetime),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002874 .location = operand.location,
Michael Butler2d3826e2020-02-04 16:08:11 -08002875 .extraParams = operand.extraParams};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002876}
2877
2878V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
2879 return operand;
2880}
2881
2882hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) {
2883 return operands;
2884}
2885
2886hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
2887 hidl_vec<V1_0::Operand> result(operands.size());
2888 std::transform(operands.begin(), operands.end(), result.begin(),
2889 [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
2890 return result;
2891}
2892
2893hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) {
2894 hidl_vec<V1_0::Operand> result(operands.size());
2895 std::transform(operands.begin(), operands.end(), result.begin(),
2896 [](const V1_3::Operand& operand) { return convertToV1_0(operand); });
2897 return result;
2898}
2899
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002900hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
2901 hidl_vec<V1_2::Operand> result(operands.size());
2902 std::transform(operands.begin(), operands.end(), result.begin(),
2903 [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
2904 return result;
2905}
2906
2907hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
2908 return operands;
2909}
2910
Colin Crossbd7f9c42019-10-10 22:58:13 +00002911hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) {
2912 hidl_vec<V1_2::Operand> result(operands.size());
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002913 std::transform(operands.begin(), operands.end(), result.begin(),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002914 [](const V1_3::Operand& operand) { return convertToV1_2(operand); });
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002915 return result;
2916}
2917
Colin Crossbd7f9c42019-10-10 22:58:13 +00002918hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) {
2919 hidl_vec<V1_3::Operand> result(operands.size());
2920 std::transform(operands.begin(), operands.end(), result.begin(),
2921 [](const V1_0::Operand& operand) { return convertToV1_3(operand); });
2922 return result;
2923}
2924
2925hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) {
2926 hidl_vec<V1_3::Operand> result(operands.size());
2927 std::transform(operands.begin(), operands.end(), result.begin(),
2928 [](const V1_2::Operand& operand) { return convertToV1_3(operand); });
2929 return result;
2930}
2931
2932hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) {
2933 return operands;
2934}
2935
2936V1_0::Model convertToV1_0(const V1_0::Model& model) {
2937 return model;
2938}
2939
2940V1_0::Model convertToV1_0(const V1_1::Model& model) {
2941 if (!compliantWithV1_0(model)) {
2942 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2943 << " from V1_1::Model to V1_0::Model";
2944 }
2945 return {.operands = model.operands,
2946 .operations = uncheckedConvertToV1_0(model.operations),
2947 .inputIndexes = model.inputIndexes,
2948 .outputIndexes = model.outputIndexes,
2949 .operandValues = model.operandValues,
2950 .pools = model.pools};
2951}
2952
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002953V1_0::Model convertToV1_0(const V1_2::Model& model) {
2954 if (!compliantWithV1_0(model)) {
2955 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
Slava Shklyaeva5055742018-10-15 14:58:25 +01002956 << " from V1_2::Model to V1_0::Model";
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002957 }
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002958 return {.operands = convertToV1_0(model.operands),
Slava Shklyaeva5055742018-10-15 14:58:25 +01002959 .operations = uncheckedConvertToV1_0(model.operations),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002960 .inputIndexes = model.inputIndexes,
2961 .outputIndexes = model.outputIndexes,
2962 .operandValues = model.operandValues,
2963 .pools = model.pools};
2964}
2965
Colin Crossbd7f9c42019-10-10 22:58:13 +00002966V1_0::Model convertToV1_0(const V1_3::Model& model) {
2967 if (!compliantWithV1_0(model)) {
2968 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2969 << " from V1_3::Model to V1_0::Model";
2970 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002971 return {.operands = convertToV1_0(model.main.operands),
2972 .operations = uncheckedConvertToV1_0(model.main.operations),
2973 .inputIndexes = model.main.inputIndexes,
2974 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002975 .operandValues = model.operandValues,
2976 .pools = model.pools};
2977}
2978
2979V1_1::Model convertToV1_1(const V1_0::Model& model) {
2980 return {.operands = model.operands,
2981 .operations = convertToV1_1(model.operations),
2982 .inputIndexes = model.inputIndexes,
2983 .outputIndexes = model.outputIndexes,
2984 .operandValues = model.operandValues,
2985 .pools = model.pools,
2986 .relaxComputationFloat32toFloat16 = false};
2987}
2988
2989V1_1::Model convertToV1_1(const V1_1::Model& model) {
2990 return model;
2991}
2992
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002993V1_1::Model convertToV1_1(const V1_2::Model& model) {
Slava Shklyaeva5055742018-10-15 14:58:25 +01002994 if (!compliantWithV1_1(model)) {
2995 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2996 << " from V1_2::Model to V1_1::Model";
2997 }
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002998 return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
Slava Shklyaeva5055742018-10-15 14:58:25 +01002999 .operations = uncheckedConvertToV1_1(model.operations),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003000 .inputIndexes = model.inputIndexes,
3001 .outputIndexes = model.outputIndexes,
3002 .operandValues = model.operandValues,
3003 .pools = model.pools,
3004 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3005}
3006
Colin Crossbd7f9c42019-10-10 22:58:13 +00003007V1_1::Model convertToV1_1(const V1_3::Model& model) {
3008 if (!compliantWithV1_1(model)) {
3009 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
3010 << " from V1_3::Model to V1_1::Model";
3011 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003012 return {// Operands in 1.1 and 1.0 are identical.
3013 .operands = convertToV1_0(model.main.operands),
3014 .operations = uncheckedConvertToV1_1(model.main.operations),
3015 .inputIndexes = model.main.inputIndexes,
3016 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00003017 .operandValues = model.operandValues,
3018 .pools = model.pools,
3019 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3020}
3021
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003022V1_2::Model convertToV1_2(const V1_0::Model& model) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01003023 return {.operands = convertToV1_2(model.operands),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003024 .operations = convertToV1_2(model.operations),
3025 .inputIndexes = model.inputIndexes,
3026 .outputIndexes = model.outputIndexes,
3027 .operandValues = model.operandValues,
3028 .pools = model.pools,
3029 .relaxComputationFloat32toFloat16 = false};
3030}
3031
3032V1_2::Model convertToV1_2(const V1_1::Model& model) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01003033 return {.operands = convertToV1_2(model.operands),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003034 .operations = convertToV1_2(model.operations),
3035 .inputIndexes = model.inputIndexes,
3036 .outputIndexes = model.outputIndexes,
3037 .operandValues = model.operandValues,
3038 .pools = model.pools,
3039 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3040}
3041
Xusong Wangc7734f32018-11-05 09:59:30 -08003042V1_2::Model convertToV1_2(const V1_2::Model& model) {
3043 return model;
3044}
3045
Colin Crossbd7f9c42019-10-10 22:58:13 +00003046V1_2::Model convertToV1_2(const V1_3::Model& model) {
3047 if (!compliantWithV1_2(model)) {
3048 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
3049 << " from V1_3::Model to V1_2::Model";
3050 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003051 return {.operands = convertToV1_2(model.main.operands),
3052 .operations = uncheckedConvertToV1_2(model.main.operations),
3053 .inputIndexes = model.main.inputIndexes,
3054 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00003055 .operandValues = model.operandValues,
3056 .pools = model.pools,
3057 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
Slava Shklyaev57941102019-12-13 17:02:29 +00003058 .extensionNameToPrefix = model.extensionNameToPrefix};
Colin Crossbd7f9c42019-10-10 22:58:13 +00003059}
3060
3061V1_3::Model convertToV1_3(const V1_0::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003062 return {.main = {.operands = convertToV1_3(model.operands),
3063 .operations = convertToV1_3(model.operations),
3064 .inputIndexes = model.inputIndexes,
3065 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003066 .operandValues = model.operandValues,
3067 .pools = model.pools,
3068 .relaxComputationFloat32toFloat16 = false};
3069}
3070
3071V1_3::Model convertToV1_3(const V1_1::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003072 return {.main = {.operands = convertToV1_3(model.operands),
3073 .operations = convertToV1_3(model.operations),
3074 .inputIndexes = model.inputIndexes,
3075 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003076 .operandValues = model.operandValues,
3077 .pools = model.pools,
3078 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3079}
3080
3081V1_3::Model convertToV1_3(const V1_2::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003082 return {.main = {.operands = convertToV1_3(model.operands),
3083 .operations = convertToV1_3(model.operations),
3084 .inputIndexes = model.inputIndexes,
3085 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003086 .operandValues = model.operandValues,
3087 .pools = model.pools,
3088 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
Slava Shklyaev57941102019-12-13 17:02:29 +00003089 .extensionNameToPrefix = model.extensionNameToPrefix};
Colin Crossbd7f9c42019-10-10 22:58:13 +00003090}
3091
3092V1_3::Model convertToV1_3(const V1_3::Model& model) {
3093 return model;
3094}
3095
Xusong Wang7ac6c9d2020-01-08 16:52:37 -08003096bool compliantWithV1_0(const V1_0::Request& request) {
3097 return true;
3098}
3099
3100bool compliantWithV1_0(const V1_3::Request& request) {
3101 return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
Xusong Wangca8c1cb2020-05-06 13:49:19 -07003102 if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
3103 return false;
3104 }
3105 const auto& name = pool.hidlMemory().name();
3106 return name == "ashmem" || name == "mmap_fd";
3107 });
3108}
3109
3110bool compliantWithV1_2(const V1_3::Request& request) {
3111 return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
3112 if (pool.getDiscriminator() != V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory) {
3113 return false;
3114 }
3115 const auto& name = pool.hidlMemory().name();
3116 return name == "ashmem" || name == "mmap_fd" || name == "hardware_buffer_blob" ||
3117 name == "hardware_buffer";
Xusong Wang7ac6c9d2020-01-08 16:52:37 -08003118 });
3119}
3120
3121static hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
3122 switch (pool.getDiscriminator()) {
3123 case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
3124 return pool.hidlMemory();
3125 case V1_3::Request::MemoryPool::hidl_discriminator::token:
3126 return hidl_memory{};
3127 }
3128}
3129
3130static V1_3::Request::MemoryPool convertToV1_3(const hidl_memory& pool) {
3131 V1_3::Request::MemoryPool ret;
3132 ret.hidlMemory(pool);
3133 return ret;
3134}
3135
3136V1_0::Request convertToV1_0(const V1_0::Request& request) {
3137 return request;
3138}
3139
Xusong Wangca8c1cb2020-05-06 13:49:19 -07003140static V1_0::Request uncheckedConvertToV1_0(const V1_3::Request& request) {
Xusong Wang7ac6c9d2020-01-08 16:52:37 -08003141 hidl_vec<hidl_memory> pools(request.pools.size());
3142 std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
3143 [](const auto& pool) { return convertToV1_0(pool); });
3144 return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
3145}
3146
Xusong Wangca8c1cb2020-05-06 13:49:19 -07003147V1_0::Request convertToV1_0(const V1_3::Request& request) {
3148 if (!compliantWithV1_0(request)) {
3149 LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
3150 << " from V1_3::Request to V1_0::Request of version 1.0";
3151 }
3152 return uncheckedConvertToV1_0(request);
3153}
3154
3155V1_0::Request convertToV1_2(const V1_3::Request& request) {
3156 if (!compliantWithV1_2(request)) {
3157 LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
3158 << " from V1_3::Request to V1_0::Request of version 1.2";
3159 }
3160 return uncheckedConvertToV1_0(request);
3161}
3162
Xusong Wang7ac6c9d2020-01-08 16:52:37 -08003163V1_3::Request convertToV1_3(const V1_0::Request& request) {
3164 hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
3165 std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
3166 [](const auto& pool) { return convertToV1_3(pool); });
3167 return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
3168}
3169
3170V1_3::Request convertToV1_3(const V1_3::Request& request) {
3171 return request;
3172}
3173
David Gross180aa6d2020-03-31 15:41:34 -07003174FenceState syncWait(int fd, int timeout) {
3175 // This implementation is directly based on the ::sync_wait() implementation.
3176
3177 struct pollfd fds;
3178 int ret;
3179
3180 if (fd < 0) {
3181 errno = EINVAL;
3182 return FenceState::UNKNOWN;
3183 }
3184
3185 fds.fd = fd;
3186 fds.events = POLLIN;
3187
3188 do {
3189 ret = poll(&fds, 1, timeout);
3190 if (ret > 0) {
3191 if (fds.revents & POLLNVAL) {
3192 errno = EINVAL;
3193 return FenceState::UNKNOWN;
3194 }
3195 if (fds.revents & POLLERR) {
3196 errno = EINVAL;
3197 return FenceState::ERROR;
3198 }
3199 return FenceState::SIGNALED;
3200 } else if (ret == 0) {
3201 errno = ETIME;
3202 return FenceState::ACTIVE;
3203 }
3204 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
3205
3206 return FenceState::UNKNOWN;
3207}
3208
David Gross0b9453e2017-09-22 17:16:51 -07003209#ifdef NN_DEBUGGABLE
David Grossa2a03632017-10-03 12:49:47 -07003210uint32_t getProp(const char* str, uint32_t defaultValue) {
Miao Wang820215d2017-10-04 19:45:45 -07003211 const std::string propStr = android::base::GetProperty(str, "");
3212 if (propStr.size() > 0) {
3213 return std::stoi(propStr);
David Grossa2a03632017-10-03 12:49:47 -07003214 } else {
3215 return defaultValue;
3216 }
David Gross0b9453e2017-09-22 17:16:51 -07003217}
David Gross0b9453e2017-09-22 17:16:51 -07003218#endif // NN_DEBUGGABLE
3219
Michael Butler43953b82019-07-22 18:59:46 -07003220} // namespace nn
3221} // namespace android