blob: e9fe46e0b2622e728b4651f52360c4cc59a250b9 [file] [log] [blame]
Jean-Luc Brouillet96775122017-07-12 01:37:27 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "Utils"
18
Slava Shklyaev3b1ea252018-11-06 15:32:44 +000019#include "Utils.h"
20
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000021#include <android-base/logging.h>
22#include <android-base/properties.h>
23#include <android-base/strings.h>
24#include <sys/system_properties.h>
Colin Crossbd7f9c42019-10-10 22:58:13 +000025
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000026#include <algorithm>
Colin Crossbd7f9c42019-10-10 22:58:13 +000027#include <limits>
28#include <set>
29#include <string>
30#include <tuple>
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000031#include <unordered_map>
Colin Crossbd7f9c42019-10-10 22:58:13 +000032#include <utility>
33#include <vector>
34
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +000035#include "ControlFlow.h"
Colin Crossbd7f9c42019-10-10 22:58:13 +000036#include "NeuralNetworks.h"
37#include "NeuralNetworksOEM.h"
38#include "OperationResolver.h"
39#include "ValidateHal.h"
Lev Proleev0ec8dcb2019-10-10 13:53:59 +000040
Jean-Luc Brouillet96775122017-07-12 01:37:27 -070041namespace android {
42namespace nn {
43
Michael Butlerd92f9742019-07-11 11:45:01 -070044using namespace hal;
45
Slava Shklyaev4bdb9822020-01-23 16:32:04 +000046constexpr PerformanceInfo kNoPerformanceInfo = {.execTime = FLT_MAX, .powerUsage = FLT_MAX};
47
Miao Wang820215d2017-10-04 19:45:45 -070048const char kVLogPropKey[] = "debug.nn.vlog";
49int vLogMask = ~0;
50
51// Split the space separated list of tags from verbose log setting and build the
52// logging mask from it. note that '1' and 'all' are special cases to enable all
53// verbose logging.
54//
55// NN API verbose logging setting comes from system property debug.nn.vlog.
56// Example:
57// setprop debug.nn.vlog 1 : enable all logging tags.
58// setprop debug.nn.vlog "model compilation" : only enable logging for MODEL and
59// COMPILATION tags.
60void initVLogMask() {
61 vLogMask = 0;
62 const std::string vLogSetting = android::base::GetProperty(kVLogPropKey, "");
63 if (vLogSetting.empty()) {
64 return;
65 }
66
Michael Butler43953b82019-07-22 18:59:46 -070067 std::unordered_map<std::string, int> vLogFlags = {{"1", -1},
68 {"all", -1},
69 {"model", MODEL},
70 {"compilation", COMPILATION},
71 {"execution", EXECUTION},
72 {"cpuexe", CPUEXE},
73 {"manager", MANAGER},
Xusong Wang5d0e5552019-11-27 12:52:28 -080074 {"driver", DRIVER},
75 {"memory", MEMORY}};
Miao Wang820215d2017-10-04 19:45:45 -070076
David Grossd55ec4a2018-03-27 15:48:03 -070077 std::vector<std::string> elements = android::base::Split(vLogSetting, " ,:");
Miao Wang820215d2017-10-04 19:45:45 -070078 for (const auto& elem : elements) {
79 const auto& flag = vLogFlags.find(elem);
80 if (flag == vLogFlags.end()) {
81 LOG(ERROR) << "Unknown trace flag: " << elem;
82 continue;
83 }
84
85 if (flag->second == -1) {
86 // -1 is used for the special values "1" and "all" that enable all
87 // tracing.
88 vLogMask = ~0;
89 return;
90 } else {
91 vLogMask |= 1 << flag->second;
92 }
93 }
94}
95
Michael Butler7002a0a2020-02-17 20:38:13 -080096static uint64_t getNanosecondsSinceEpoch(const std::chrono::steady_clock::time_point& time) {
97 const auto timeSinceEpoch = time.time_since_epoch();
98 return std::chrono::duration_cast<std::chrono::nanoseconds>(timeSinceEpoch).count();
99}
Michael Butler83e406e2019-12-16 18:32:45 -0800100
Michael Butler7002a0a2020-02-17 20:38:13 -0800101uint64_t getCurrentNanosecondsSinceEpoch() {
102 return getNanosecondsSinceEpoch(std::chrono::steady_clock::now());
103}
104
105static std::pair<int, OptionalTimePoint> makeTimePoint(uint64_t duration) {
Michael Butler39c6e3e2020-02-12 18:57:53 -0800106 // Relevant time points.
107 const uint64_t maxNanosecondsSinceEpoch =
108 getNanosecondsSinceEpoch(std::chrono::steady_clock::time_point::max());
Michael Butler7002a0a2020-02-17 20:38:13 -0800109 const uint64_t currentNanosecondsSinceEpoch = getCurrentNanosecondsSinceEpoch();
Michael Butler39c6e3e2020-02-12 18:57:53 -0800110
111 // Check for overflow.
112 if (duration > maxNanosecondsSinceEpoch - currentNanosecondsSinceEpoch) {
Michael Butler83e406e2019-12-16 18:32:45 -0800113 LOG(ERROR) << "Launching execution failed due to time point overflow";
114 return {ANEURALNETWORKS_BAD_DATA, {}};
115 }
Michael Butler83e406e2019-12-16 18:32:45 -0800116
Michael Butler39c6e3e2020-02-12 18:57:53 -0800117 // Load and return OptionalTimePoint.
Michael Butler83e406e2019-12-16 18:32:45 -0800118 OptionalTimePoint otp;
Michael Butler39c6e3e2020-02-12 18:57:53 -0800119 otp.nanosecondsSinceEpoch(currentNanosecondsSinceEpoch + duration);
Michael Butler83e406e2019-12-16 18:32:45 -0800120 return {ANEURALNETWORKS_NO_ERROR, otp};
121}
122
123std::pair<int, OptionalTimePoint> makeTimePoint(std::optional<uint64_t> duration) {
124 const std::pair<int, OptionalTimePoint> empty = {ANEURALNETWORKS_NO_ERROR, {}};
125 return duration.has_value() ? makeTimePoint(*duration) : empty;
126}
127
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000128static bool isExtensionOperandType(int32_t type) {
129 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperandTypeRange::BASE_MAX);
130}
131
132static bool isExtensionOperationType(ANeuralNetworksOperationType type) {
133 return static_cast<uint32_t>(type) > static_cast<uint32_t>(OperationTypeRange::BASE_MAX);
134}
135
136bool isExtensionOperandType(OperandType type) {
137 return isExtensionOperandType(static_cast<int32_t>(type));
138}
139
140bool isExtensionOperationType(OperationType type) {
141 return isExtensionOperationType(static_cast<int32_t>(type));
142}
143
David Gross5e7827e2017-09-13 12:31:13 -0700144namespace {
145
146template <typename EntryType, uint32_t entryCount, uint32_t entryCountOEM>
147EntryType tableLookup(const EntryType (&table)[entryCount],
Michael Butler43953b82019-07-22 18:59:46 -0700148 const EntryType (&tableOEM)[entryCountOEM], uint32_t code) {
David Gross5e7827e2017-09-13 12:31:13 -0700149 if (code < entryCount) {
150 return table[code];
151 } else if (code >= kOEMCodeBase && (code - kOEMCodeBase) < entryCountOEM) {
152 return tableOEM[code - kOEMCodeBase];
153 } else {
154 nnAssert(!"tableLookup: bad code");
155 return EntryType();
156 }
157}
158
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000159class OperationValidationContext : public IOperationValidationContext {
160 DISALLOW_IMPLICIT_CONSTRUCTORS(OperationValidationContext);
161
162 public:
Slava Shklyaev432890e2019-09-30 16:04:43 +0100163 OperationValidationContext(const char* operationName, uint32_t inputCount,
164 const uint32_t* inputIndexes, uint32_t outputCount,
165 const uint32_t* outputIndexes, const Operand* operands,
166 HalVersion halVersion)
167 : operationName(operationName),
168 inputCount(inputCount),
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000169 inputIndexes(inputIndexes),
170 outputCount(outputCount),
171 outputIndexes(outputIndexes),
172 operands(operands),
173 halVersion(halVersion) {}
174
Slava Shklyaev432890e2019-09-30 16:04:43 +0100175 const char* getOperationName() const override;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000176 HalVersion getHalVersion() const override;
177
178 uint32_t getNumInputs() const override;
179 OperandType getInputType(uint32_t index) const override;
180 Shape getInputShape(uint32_t index) const override;
Michael Butler2d3826e2020-02-04 16:08:11 -0800181 const OperandExtraParams getInputExtraParams(uint32_t index) const override;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000182
183 uint32_t getNumOutputs() const override;
184 OperandType getOutputType(uint32_t index) const override;
185 Shape getOutputShape(uint32_t index) const override;
186
187 private:
188 const Operand* getInputOperand(uint32_t index) const;
189 const Operand* getOutputOperand(uint32_t index) const;
190
Slava Shklyaev432890e2019-09-30 16:04:43 +0100191 const char* operationName;
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000192 uint32_t inputCount;
193 const uint32_t* inputIndexes;
194 uint32_t outputCount;
195 const uint32_t* outputIndexes;
196 const Operand* operands;
197 HalVersion halVersion;
198};
199
Slava Shklyaev432890e2019-09-30 16:04:43 +0100200const char* OperationValidationContext::getOperationName() const {
201 return operationName;
202}
203
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000204HalVersion OperationValidationContext::getHalVersion() const {
205 return halVersion;
206}
207
208const Operand* OperationValidationContext::getInputOperand(uint32_t index) const {
209 CHECK(index < static_cast<uint32_t>(inputCount));
210 return &operands[inputIndexes[index]];
211}
212
213const Operand* OperationValidationContext::getOutputOperand(uint32_t index) const {
214 CHECK(index < static_cast<uint32_t>(outputCount));
215 return &operands[outputIndexes[index]];
216}
217
218uint32_t OperationValidationContext::getNumInputs() const {
219 return inputCount;
220}
221
222uint32_t OperationValidationContext::getNumOutputs() const {
223 return outputCount;
224}
225
226OperandType OperationValidationContext::getInputType(uint32_t index) const {
227 return getInputOperand(index)->type;
228}
229
230Shape OperationValidationContext::getInputShape(uint32_t index) const {
231 const Operand* operand = getInputOperand(index);
Slava Shklyaevf50f2232019-01-22 14:23:55 +0000232 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
233 operand->extraParams};
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000234}
235
Michael Butler2d3826e2020-02-04 16:08:11 -0800236const OperandExtraParams OperationValidationContext::getInputExtraParams(uint32_t index) const {
Xusong Wang4d7410e2019-03-13 15:20:16 -0700237 return getInputOperand(index)->extraParams;
238}
239
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000240OperandType OperationValidationContext::getOutputType(uint32_t index) const {
241 return getOutputOperand(index)->type;
242}
243
244Shape OperationValidationContext::getOutputShape(uint32_t index) const {
245 const Operand* operand = getOutputOperand(index);
Slava Shklyaevf50f2232019-01-22 14:23:55 +0000246 return {operand->type, operand->dimensions, operand->scale, operand->zeroPoint,
247 operand->extraParams};
Slava Shklyaev3b1ea252018-11-06 15:32:44 +0000248}
249
David Gross5e7827e2017-09-13 12:31:13 -0700250}; // anonymous namespace
251
Jean-Luc Brouilletc5e342b2017-10-11 22:28:55 -0700252#define COUNT(X) (sizeof(X) / sizeof(X[0]))
253
Slava Shklyaevd1e50b12019-01-16 16:34:51 +0000254std::string getOperandTypeName(OperandType type) {
255 return toString(type);
Jean-Luc Brouilletc5e342b2017-10-11 22:28:55 -0700256}
257
Slava Shklyaev6e912802019-01-07 14:16:49 +0000258static std::string getOperationName(uint32_t code) {
259 return getOperationName(static_cast<OperationType>(code));
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000260}
261
Slava Shklyaev6e912802019-01-07 14:16:49 +0000262std::string getOperationName(OperationType type) {
263 return toString(type);
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700264}
265
Jean-Luc Brouilletc1ab6fc2017-08-31 18:07:29 -0700266const uint32_t kSizeOfDataType[]{
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100267 4, // ANEURALNETWORKS_FLOAT32
268 4, // ANEURALNETWORKS_INT32
269 4, // ANEURALNETWORKS_UINT32
270 4, // ANEURALNETWORKS_TENSOR_FLOAT32
271 4, // ANEURALNETWORKS_TENSOR_INT32
Colin Crossbd7f9c42019-10-10 22:58:13 +0000272 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100273 1, // ANEURALNETWORKS_BOOL
Lev Proleev19c95f22018-11-13 15:44:31 +0000274 2, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
Michael K. Sanders8c8964e2018-10-12 09:07:36 +0100275 2, // ANEURALNETWORKS_TENSOR_FLOAT16
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000276 1, // ANEURALNETWORKS_TENSOR_BOOL8
Xusong Wang1fdeca82018-12-05 15:04:07 -0800277 2, // ANEURALNETWORKS_FLOAT16
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000278 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
Xusong Wanga69b8b12019-01-17 11:30:09 -0800279 2, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000280 1, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
Colin Crossbd7f9c42019-10-10 22:58:13 +0000281 1, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000282 0, // ANEURALNETWORKS_MODEL
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700283};
284
Jean-Luc Brouilletc1ab6fc2017-08-31 18:07:29 -0700285static_assert(COUNT(kSizeOfDataType) == kNumberOfDataTypes, "kSizeOfDataType is incorrect");
286
David Gross96811e22017-10-02 14:40:09 -0700287const bool kScalarDataType[]{
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100288 true, // ANEURALNETWORKS_FLOAT32
289 true, // ANEURALNETWORKS_INT32
290 true, // ANEURALNETWORKS_UINT32
291 false, // ANEURALNETWORKS_TENSOR_FLOAT32
292 false, // ANEURALNETWORKS_TENSOR_INT32
Colin Crossbd7f9c42019-10-10 22:58:13 +0000293 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
Lev Proleevfeafd0b2018-10-02 14:15:58 +0100294 true, // ANEURALNETWORKS_BOOL
Lev Proleev19c95f22018-11-13 15:44:31 +0000295 false, // ANEURALNETWORKS_TENSOR_QUANT16_SYMM
Michael K. Sanders8c8964e2018-10-12 09:07:36 +0100296 false, // ANEURALNETWORKS_TENSOR_FLOAT16
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000297 false, // ANEURALNETWORKS_TENSOR_BOOL8
Xusong Wang1fdeca82018-12-05 15:04:07 -0800298 true, // ANEURALNETWORKS_FLOAT16
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000299 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL
Xusong Wanga69b8b12019-01-17 11:30:09 -0800300 false, // ANEURALNETWORKS_TENSOR_QUANT16_ASYMM
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000301 false, // ANEURALNETWORKS_TENSOR_QUANT8_SYMM
Colin Crossbd7f9c42019-10-10 22:58:13 +0000302 false, // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000303 true, // ANEURALNETWORKS_MODEL
David Gross96811e22017-10-02 14:40:09 -0700304};
305
306static_assert(COUNT(kScalarDataType) == kNumberOfDataTypes, "kScalarDataType is incorrect");
307
David Gross5e7827e2017-09-13 12:31:13 -0700308const uint32_t kSizeOfDataTypeOEM[]{
Michael Butler43953b82019-07-22 18:59:46 -0700309 0, // ANEURALNETWORKS_OEM
310 1, // ANEURALNETWORKS_TENSOR_OEM_BYTE
David Gross5e7827e2017-09-13 12:31:13 -0700311};
312
313static_assert(COUNT(kSizeOfDataTypeOEM) == kNumberOfDataTypesOEM,
314 "kSizeOfDataTypeOEM is incorrect");
315
David Gross96811e22017-10-02 14:40:09 -0700316const bool kScalarDataTypeOEM[]{
Michael Butler43953b82019-07-22 18:59:46 -0700317 true, // ANEURALNETWORKS_OEM
318 false, // ANEURALNETWORKS_TENSOR_OEM_BYTE
David Gross96811e22017-10-02 14:40:09 -0700319};
320
321static_assert(COUNT(kScalarDataTypeOEM) == kNumberOfDataTypesOEM,
322 "kScalarDataTypeOEM is incorrect");
323
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000324bool nonExtensionOperandTypeIsScalar(int type) {
325 CHECK(!isExtensionOperandType(type)) << "Extension operand types are not supported";
326 return tableLookup(kScalarDataType, kScalarDataTypeOEM, type);
327}
328
329uint32_t nonExtensionOperandSizeOfData(OperandType type, const std::vector<uint32_t>& dimensions) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000330 CHECK(!isExtensionOperandType(type)) << "Size of extension operand data is unknown";
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700331 int n = static_cast<int>(type);
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700332
David Gross5e7827e2017-09-13 12:31:13 -0700333 uint32_t size = tableLookup(kSizeOfDataType, kSizeOfDataTypeOEM, n);
334
David Gross96811e22017-10-02 14:40:09 -0700335 if (tableLookup(kScalarDataType, kScalarDataTypeOEM, n) == true) {
336 return size;
337 }
338
Xusong Wange5be7ce2018-11-07 15:03:29 -0800339 if (dimensions.empty()) {
340 return 0;
341 }
342
Jean-Luc Brouillet96775122017-07-12 01:37:27 -0700343 for (auto d : dimensions) {
344 size *= d;
345 }
346 return size;
347}
348
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000349bool tensorHasUnspecifiedDimensions(int type, const uint32_t* dim, uint32_t dimCount) {
350 if (!isExtensionOperandType(type)) {
351 CHECK(!nonExtensionOperandTypeIsScalar(type))
352 << "A scalar type can never have unspecified dimensions";
Slava Shklyaeva1516352019-02-06 16:35:49 +0000353 }
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000354 return dimCount == 0 || std::find(dim, dim + dimCount, 0) != (dim + dimCount);
David Gross1ec15052018-06-01 11:01:12 -0700355}
356
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000357bool tensorHasUnspecifiedDimensions(const ANeuralNetworksOperandType* type) {
358 return tensorHasUnspecifiedDimensions(type->type, type->dimensions, type->dimensionCount);
David Gross1ec15052018-06-01 11:01:12 -0700359}
360
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000361bool tensorHasUnspecifiedDimensions(const Operand& operand) {
362 return tensorHasUnspecifiedDimensions(static_cast<int>(operand.type), operand.dimensions.data(),
363 operand.dimensions.size());
David Gross1ec15052018-06-01 11:01:12 -0700364}
365
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700366uint32_t alignBytesNeeded(uint32_t index, size_t length) {
367 uint32_t pattern;
368 if (length < 2) {
Michael Butler43953b82019-07-22 18:59:46 -0700369 pattern = 0; // No alignment necessary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700370 } else if (length < 4) {
Michael Butler43953b82019-07-22 18:59:46 -0700371 pattern = 1; // Align on 2-byte boundary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700372 } else {
Michael Butler43953b82019-07-22 18:59:46 -0700373 pattern = 3; // Align on 4-byte boundary
Jean-Luc Brouillet707dbd22017-07-25 00:17:50 -0700374 }
375 uint32_t extra = (~(index - 1)) & pattern;
376 return extra;
377}
378
Michael Butler75886e72018-01-23 11:05:43 -0800379void logModelToInfo(const V1_0::Model& model) {
380 LOG(INFO) << "V1_0::Model start";
381 LOG(INFO) << "operands" << toString(model.operands);
382 LOG(INFO) << "operations" << toString(model.operations);
383 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
384 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
385 LOG(INFO) << "operandValues size" << model.operandValues.size();
Miao Wang64031fa2018-04-10 15:20:53 -0700386 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Michael Butler75886e72018-01-23 11:05:43 -0800387}
388
389void logModelToInfo(const V1_1::Model& model) {
390 LOG(INFO) << "V1_1::Model start";
Jean-Luc Brouillet1da8fed2017-10-11 22:34:04 -0700391 LOG(INFO) << "operands" << toString(model.operands);
392 LOG(INFO) << "operations" << toString(model.operations);
393 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
394 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000395 LOG(INFO) << "operandValues size " << model.operandValues.size();
Miao Wang64031fa2018-04-10 15:20:53 -0700396 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Jean-Luc Brouillet1da8fed2017-10-11 22:34:04 -0700397}
398
Colin Crossbd7f9c42019-10-10 22:58:13 +0000399void logModelToInfo(const V1_2::Model& model) {
400 LOG(INFO) << "V1_2::Model start";
401 LOG(INFO) << "operands" << toString(model.operands);
402 LOG(INFO) << "operations" << toString(model.operations);
403 LOG(INFO) << "inputIndexes" << toString(model.inputIndexes);
404 LOG(INFO) << "outputIndexes" << toString(model.outputIndexes);
405 LOG(INFO) << "operandValues size" << model.operandValues.size();
406 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
407 LOG(INFO) << "relaxComputationFloat32toFloat16" << model.relaxComputationFloat32toFloat16;
408 LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
409}
410
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000411static void logSubgraphToInfo(std::string label, const V1_3::Subgraph& subgraph) {
412 LOG(INFO) << label << ".operands" << toString(subgraph.operands);
413 LOG(INFO) << label << ".operations" << toString(subgraph.operations);
414 LOG(INFO) << label << ".inputIndexes" << toString(subgraph.inputIndexes);
415 LOG(INFO) << label << ".outputIndexes" << toString(subgraph.outputIndexes);
416}
417
Colin Crossbd7f9c42019-10-10 22:58:13 +0000418void logModelToInfo(const V1_3::Model& model) {
419 LOG(INFO) << "V1_3::Model start";
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000420 logSubgraphToInfo("main", model.main);
421 for (uint32_t i = 0, n = model.referenced.size(); i < n; ++i) {
422 logSubgraphToInfo("referenced[" + std::to_string(i) + "]", model.referenced[i]);
423 }
424 LOG(INFO) << "operandValues size " << model.operandValues.size();
Colin Crossbd7f9c42019-10-10 22:58:13 +0000425 LOG(INFO) << "pools" << SHOW_IF_DEBUG(toString(model.pools));
Slava Shklyaev8de7a222019-12-13 18:05:41 +0000426 LOG(INFO) << "relaxComputationFloat32toFloat16 " << model.relaxComputationFloat32toFloat16;
Colin Crossbd7f9c42019-10-10 22:58:13 +0000427 LOG(INFO) << "extensionNameToPrefix" << toString(model.extensionNameToPrefix);
428}
429
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000430bool validateOperandSymmPerChannelQuantParams(
431 const Operand& halOperand, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant,
432 const char* tag) {
433 if (halOperand.type != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
434 return false;
435 }
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000436
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000437 NN_RET_CHECK_LT(channelQuant.channelDim, halOperand.dimensions.size()) << tag;
438 NN_RET_CHECK(channelQuant.scales != nullptr) << tag;
439 NN_RET_CHECK_EQ(channelQuant.scaleCount, halOperand.dimensions[channelQuant.channelDim]) << tag;
440 NN_RET_CHECK_NE(halOperand.dimensions[channelQuant.channelDim], 0u)
441 << tag << " channel dimension " << channelQuant.channelDim << " is underspecified";
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000442 for (uint32_t i = 0; i < halOperand.dimensions[channelQuant.channelDim]; i++) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000443 NN_RET_CHECK_GT(channelQuant.scales[i], 0.0f) << tag << " invalid scaleArray[" << i << "]";
Przemyslaw Szczepaniak20aa79e2018-11-08 14:56:06 +0000444 }
445 return true;
446}
447
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000448static bool validateScalarDimensions(const ANeuralNetworksOperandType& type, const char* tag) {
449 NN_RET_CHECK_EQ(type.dimensionCount, 0u) << tag << " invalid dimensions for scalar type";
450 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type";
451 return true;
452}
453
454static bool validateQuant8AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
455 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255)
456 << tag << " invalid zeroPoint: " << type.zeroPoint;
457 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
458 return true;
459}
460
Colin Crossbd7f9c42019-10-10 22:58:13 +0000461static bool validateQuant8AsymmSignedParams(const ANeuralNetworksOperandType& type,
462 const char* tag) {
463 NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127)
464 << tag << " invalid zeroPoint: " << type.zeroPoint;
465 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
466 return true;
467}
468
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000469static bool validateQuant8SymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
470 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " invalid zeroPoint: " << type.zeroPoint;
471 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
472 return true;
473}
474
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000475static bool validateQuant16AsymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
476 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535)
477 << tag << " invalid zeroPoint: " << type.zeroPoint;
478 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
479 return true;
480}
481
482static bool validateQuantSymmParams(const ANeuralNetworksOperandType& type, const char* tag) {
483 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
484 NN_RET_CHECK_GT(type.scale, 0.f) << tag << " invalid scale";
485 return true;
486}
487
488static bool validateNoQuantParams(const ANeuralNetworksOperandType& type, const char* tag) {
489 NN_RET_CHECK_EQ(type.zeroPoint, 0) << tag << " zeroPoint is not zero";
490 NN_RET_CHECK_EQ(type.scale, 0.f) << tag << " scale is not zero";
491 return true;
492}
493
494static bool validateTensorDimensions(const ANeuralNetworksOperandType& type, const char* tag,
495 bool allowPartial) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000496 if (allowPartial) {
497 return true;
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700498 }
David Gross1ec15052018-06-01 11:01:12 -0700499 NN_RET_CHECK_GT(type.dimensionCount, 0u) << tag << " invalid operand dimensions";
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000500 for (uint32_t i = 0; i < type.dimensionCount; i++) {
501 NN_RET_CHECK_NE(type.dimensions[i], 0u) << tag << " invalid operand dimensions";
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700502 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000503 return true;
504}
505
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000506static bool validateOperandTypeHelper(
507 const ANeuralNetworksOperandType& type,
508 const Extension::OperandTypeInformation* const extensionOperandTypeInfo, const char* tag,
509 bool allowPartial) {
Xusong Wang35740192019-02-22 15:00:01 -0800510 NN_RET_CHECK_EQ(type.dimensionCount == 0, type.dimensions == nullptr);
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000511 if (isExtensionOperandType(type.type)) {
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000512 NN_RET_CHECK(extensionOperandTypeInfo != nullptr);
513 if (extensionOperandTypeInfo->isTensor) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000514 NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000515 } else {
516 NN_RET_CHECK(validateScalarDimensions(type, tag));
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700517 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000518 return validateNoQuantParams(type, tag);
Miao Wang25f0f2d2017-09-28 14:41:37 -0700519 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000520
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000521 NN_RET_CHECK(extensionOperandTypeInfo == nullptr);
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000522 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type))
523 << tag << " invalid OperandType: " << type.type;
524
525 bool isScalar = tableLookup(kScalarDataType, kScalarDataTypeOEM, type.type);
526 if (isScalar) {
527 NN_RET_CHECK(validateScalarDimensions(type, tag));
528 if (type.type != ANEURALNETWORKS_OEM_SCALAR) { // Historically, we have allowed OEM types
529 // to use quantization parameters.
530 NN_RET_CHECK(validateNoQuantParams(type, tag));
Xusong Wanga69b8b12019-01-17 11:30:09 -0800531 }
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000532 } else {
533 NN_RET_CHECK(validateTensorDimensions(type, tag, allowPartial));
534 if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
535 NN_RET_CHECK(validateQuant8AsymmParams(type, tag));
Colin Crossbd7f9c42019-10-10 22:58:13 +0000536 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
537 NN_RET_CHECK(validateQuant8AsymmSignedParams(type, tag));
Lev Proleevbf26bbb2019-02-19 15:26:19 +0000538 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM) {
539 NN_RET_CHECK(validateQuant8SymmParams(type, tag));
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000540 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM) {
541 NN_RET_CHECK(validateQuant16AsymmParams(type, tag));
542 } else if (type.type == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
543 NN_RET_CHECK(validateQuantSymmParams(type, tag));
544 } else if (type.type == ANEURALNETWORKS_TENSOR_INT32) {
545 // TODO(b/119869082): TENSOR_INT32 should not use quantization parameters.
546 } else if (type.type == ANEURALNETWORKS_TENSOR_OEM_BYTE) {
547 // Historically, we have allowed OEM types to use quantization parameters.
548 } else {
549 NN_RET_CHECK(validateNoQuantParams(type, tag));
Slava Shklyaev4bd0edc2018-11-26 21:03:50 +0000550 }
551 }
Przemyslaw Szczepaniak6419e672018-12-19 12:07:03 +0000552
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000553 return true;
554}
555
Slava Shklyaev48488fc2019-02-11 18:26:29 +0000556int validateOperandType(const ANeuralNetworksOperandType& type,
557 const Extension::OperandTypeInformation* const extensionOperandTypeInfo,
558 const char* tag, bool allowPartial) {
559 return validateOperandTypeHelper(type, extensionOperandTypeInfo, tag, allowPartial)
560 ? ANEURALNETWORKS_NO_ERROR
561 : ANEURALNETWORKS_BAD_DATA;
Jean-Luc Brouillete127e492017-09-27 23:59:20 -0700562}
563
564int validateOperandList(uint32_t count, const uint32_t* list, uint32_t operandCount,
565 const char* tag) {
566 for (uint32_t i = 0; i < count; i++) {
567 if (list[i] >= operandCount) {
568 LOG(ERROR) << tag << " invalid operand index at " << i << " = " << list[i]
569 << ", operandCount " << operandCount;
570 return ANEURALNETWORKS_BAD_DATA;
571 }
572 }
573 return ANEURALNETWORKS_NO_ERROR;
574}
575
Michael Butler43953b82019-07-22 18:59:46 -0700576int validateOperationOperandTypes(const std::vector<Operand>& operands, uint32_t inOperandCount,
577 const uint32_t* inOperandIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800578 const std::vector<OperandType>& inExpectedTypes,
579 uint32_t outOperandCount, const uint32_t* outOperandIndexes,
580 const std::vector<OperandType>& outExpectedInTypes) {
Slava Shklyaevc2b17632018-12-04 14:17:02 +0000581 if (inOperandCount != static_cast<uint32_t>(inExpectedTypes.size()) ||
582 outOperandCount != static_cast<uint32_t>(outExpectedInTypes.size())) {
Slava Shklyaev9616a672018-10-29 18:25:11 +0000583 LOG(ERROR) << "Wrong operand count: expected " << inExpectedTypes.size() << " inputs and "
584 << outExpectedInTypes.size() << " outputs,"
585 << "got " << inOperandCount << " inputs and " << outOperandCount << " outputs";
Miao Wang137d2782018-03-06 15:03:14 -0800586 return ANEURALNETWORKS_BAD_DATA;
587 }
588 for (uint32_t i = 0; i < inOperandCount; i++) {
589 if (operands[inOperandIndexes[i]].type != inExpectedTypes[i]) {
590 LOG(ERROR) << "Invalid input tensor type "
Michael Butler43953b82019-07-22 18:59:46 -0700591 << toString(operands[inOperandIndexes[i]].type) << " for input " << i
592 << ", expected " << toString(inExpectedTypes[i]);
Miao Wang137d2782018-03-06 15:03:14 -0800593 return ANEURALNETWORKS_BAD_DATA;
594 }
595 }
596 for (uint32_t i = 0; i < outOperandCount; i++) {
597 if (operands[outOperandIndexes[i]].type != outExpectedInTypes[i]) {
598 LOG(ERROR) << "Invalid output tensor type "
Michael Butler43953b82019-07-22 18:59:46 -0700599 << toString(operands[outOperandIndexes[i]].type) << " for input " << i
600 << ", expected " << toString(outExpectedInTypes[i]);
Miao Wang137d2782018-03-06 15:03:14 -0800601 return ANEURALNETWORKS_BAD_DATA;
602 }
603 }
604
605 return ANEURALNETWORKS_NO_ERROR;
606}
607
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000608static int validateHalVersion(ANeuralNetworksOperationType opType, HalVersion halVersion,
609 HalVersion minSupportedHalVersion) {
610 if (halVersion < minSupportedHalVersion) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000611 LOG(ERROR) << "The given inputs and outputs for operation " << getOperationName(opType)
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000612 << " are only supported in " << toString(minSupportedHalVersion)
613 << " and later (validating using " << toString(halVersion) << ")";
614 return ANEURALNETWORKS_BAD_DATA;
615 }
616 return ANEURALNETWORKS_NO_ERROR;
617}
618
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000619// Checks if two operands have the same types, shapes, and parameters.
620// Omits lifetime, numberOfConsumers, and location.
621static bool compatible(const Operand& a, const Operand& b) {
622 NN_RET_CHECK(a.type == b.type) << toString(a.type) << " != " << toString(b.type);
623 NN_RET_CHECK(a.dimensions == b.dimensions)
624 << toString(a.dimensions) << " != " << toString(b.dimensions);
625 NN_RET_CHECK_EQ(a.scale, b.scale);
626 NN_RET_CHECK_EQ(a.zeroPoint, b.zeroPoint);
627 NN_RET_CHECK(a.extraParams == b.extraParams)
628 << toString(a.extraParams) << " != " << toString(b.extraParams);
629 return true;
630}
631
632static bool validateConditionOperand(const Operand& operand) {
633 NN_RET_CHECK(operand.type == OperandType::TENSOR_BOOL8)
634 << "Unexpected condition operand type: " << toString(operand.type);
635 NN_RET_CHECK_EQ(operand.dimensions.size(), 1u) << "Condition operand must be a singleton";
636 NN_RET_CHECK_EQ(operand.dimensions[0], 1u) << "Condition operand must be a singleton";
637 return true;
638}
639
640static void checkSubgraphValidationHelper(const SubgraphValidationHelper& helper) {
641 CHECK(helper.isValidSubgraphReference != nullptr);
642 CHECK(helper.getSubgraphInputCount != nullptr);
643 CHECK(helper.getSubgraphOutputCount != nullptr);
644 CHECK(helper.getSubgraphInputOperand != nullptr);
645 CHECK(helper.getSubgraphOutputOperand != nullptr);
646}
647
648static bool validateIfOperation(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
649 const uint32_t* outputs, const std::vector<Operand>& operands,
650 const SubgraphValidationHelper& helper) {
651 namespace op = operation_if;
652 checkSubgraphValidationHelper(helper);
653 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_IF must have at least 3 inputs";
654 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_IF must have at least 1 output";
655 auto validateBranchOperand = [&](const Operand& branchModelOperand) -> bool {
656 NN_RET_CHECK(helper.isValidSubgraphReference(branchModelOperand))
657 << "Operand is not a valid subgraph reference";
658 const uint32_t branchModelInputCount = helper.getSubgraphInputCount(branchModelOperand);
659 const uint32_t branchModelOutputCount = helper.getSubgraphOutputCount(branchModelOperand);
660 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + branchModelInputCount);
661 NN_RET_CHECK_EQ(outputCount, branchModelOutputCount);
662 for (uint32_t i = 0; i < branchModelInputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000663 const Operand& innerOperand = *helper.getSubgraphInputOperand(branchModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000664 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
665 NN_RET_CHECK(compatible(innerOperand, outerOperand));
666 }
667 for (uint32_t i = 0; i < branchModelOutputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000668 const Operand& innerOperand = *helper.getSubgraphOutputOperand(branchModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000669 const Operand& outerOperand = operands[outputs[i]];
670 NN_RET_CHECK(compatible(innerOperand, outerOperand));
671 }
672 return true;
673 };
674 NN_RET_CHECK(validateConditionOperand(operands[inputs[op::kCondBoolOperand]]))
675 << "Validation failed for IF condition operand";
676 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kThenModelOperand]]))
677 << "Validation failed for IF then model";
678 NN_RET_CHECK(validateBranchOperand(operands[inputs[op::kElseModelOperand]]))
679 << "Validation failed for IF else model";
680 return true;
681}
682
683static bool validateWhileOperation(uint32_t inputCount, const uint32_t* inputs,
684 uint32_t outputCount, const uint32_t* outputs,
685 const std::vector<Operand>& operands,
686 const SubgraphValidationHelper& helper) {
687 // Let the loop have
688 // - m >= 1 input-output operands,
689 // - k >= 0 state-only operands, and
690 // - n >= 0 input-only operands.
691 // Then
692 // - the WHILE loop operation has (2 + m + k + n) inputs and m outputs.
693 // - the condition model has (m + k + n) inputs and 1 output.
694 // - the body model has (m + k + n) inputs and (m + k) outputs.
695 namespace op = operation_while;
696 checkSubgraphValidationHelper(helper);
697 NN_RET_CHECK_GE(inputCount, 3u) << "ANEURALNETWORKS_WHILE must have at least 3 inputs";
698 NN_RET_CHECK_GE(outputCount, 1u) << "ANEURALNETWORKS_WHILE must have at least 1 output";
699 auto validateCondOperand = [&](const Operand& condModelOperand) -> bool {
700 NN_RET_CHECK(helper.isValidSubgraphReference(condModelOperand))
701 << "Operand is not a valid subgraph reference";
702 const uint32_t condModelInputCount = helper.getSubgraphInputCount(condModelOperand);
703 const uint32_t condModelOutputCount = helper.getSubgraphOutputCount(condModelOperand);
704 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + condModelInputCount);
705 NN_RET_CHECK_EQ(condModelOutputCount, 1u);
706 for (uint32_t i = 0; i < condModelInputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000707 const Operand& innerOperand = *helper.getSubgraphInputOperand(condModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000708 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
709 NN_RET_CHECK(compatible(innerOperand, outerOperand));
710 }
711 NN_RET_CHECK(
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000712 validateConditionOperand(*helper.getSubgraphOutputOperand(condModelOperand, 0)));
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000713 return true;
714 };
715 auto validateBodyOperand = [&](const Operand& bodyModelOperand) -> bool {
716 NN_RET_CHECK(helper.isValidSubgraphReference(bodyModelOperand))
717 << "Operand is not a valid subgraph reference";
718 const uint32_t bodyModelInputCount = helper.getSubgraphInputCount(bodyModelOperand);
719 const uint32_t bodyModelOutputCount = helper.getSubgraphOutputCount(bodyModelOperand);
720 NN_RET_CHECK_EQ(inputCount, op::kFirstInput + bodyModelInputCount);
721 NN_RET_CHECK_GE(bodyModelOutputCount, outputCount);
722 NN_RET_CHECK_GE(bodyModelInputCount, bodyModelOutputCount);
723 const uint32_t inputOutputCount = outputCount;
724 const uint32_t stateOnlyCount = bodyModelOutputCount - inputOutputCount;
725 const uint32_t inputOnlyCount = bodyModelInputCount - bodyModelOutputCount;
726 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount + inputOnlyCount; i < n; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000727 const Operand& innerOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000728 const Operand& outerOperand = operands[inputs[op::kFirstInput + i]];
729 NN_RET_CHECK(compatible(innerOperand, outerOperand));
730 }
731 for (uint32_t i = 0; i < inputOutputCount; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000732 const Operand& innerOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000733 const Operand& outerOperand = operands[outputs[i]];
734 NN_RET_CHECK(compatible(innerOperand, outerOperand));
735 }
736 for (uint32_t i = 0, n = inputOutputCount + stateOnlyCount; i < n; ++i) {
Slava Shklyaeve3b8bd82020-02-20 14:09:13 +0000737 const Operand& inputOperand = *helper.getSubgraphInputOperand(bodyModelOperand, i);
738 const Operand& outputOperand = *helper.getSubgraphOutputOperand(bodyModelOperand, i);
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000739 NN_RET_CHECK(compatible(inputOperand, outputOperand));
740 }
741 return true;
742 };
743 NN_RET_CHECK(validateCondOperand(operands[inputs[op::kCondModelOperand]]))
744 << "Validation failed for WHILE condition model";
745 NN_RET_CHECK(validateBodyOperand(operands[inputs[op::kBodyModelOperand]]))
746 << "Validation failed for WHILE body model";
747 return true;
748}
749
750static inline int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
751 const uint32_t* inputIndexes, uint32_t outputCount,
752 const uint32_t* outputIndexes,
753 const std::vector<hal::Operand>& operands,
754 HalVersion halVersion) {
755 if (opType == ANEURALNETWORKS_IF || opType == ANEURALNETWORKS_WHILE) {
756 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
757 LOG(ERROR) << "This validateOperation() overload does not support control flow";
758 return ANEURALNETWORKS_BAD_DATA;
759 }
760 return validateOperation(opType, inputCount, inputIndexes, outputCount, outputIndexes, operands,
761 halVersion, {});
762}
763
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100764int validateOperation(ANeuralNetworksOperationType opType, uint32_t inputCount,
765 const uint32_t* inputIndexes, uint32_t outputCount,
766 const uint32_t* outputIndexes, const std::vector<Operand>& operands,
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +0000767 HalVersion halVersion, const SubgraphValidationHelper& helper) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +0000768 NN_RETURN_IF_ERROR(validateOperandList(inputCount, inputIndexes,
769 static_cast<uint32_t>(operands.size()),
770 "ANeuralNetworksModel_addOperation inputs"));
771 NN_RETURN_IF_ERROR(validateOperandList(outputCount, outputIndexes,
772 static_cast<uint32_t>(operands.size()),
773 "ANeuralNetworksModel_addOperation outputs"));
774
775 if (isExtensionOperationType(opType)) {
776 if (halVersion < HalVersion::V1_2) {
777 LOG(ERROR)
778 << "Extension operations are supported since HAL version 1.2, validating using "
779 << toString(halVersion);
780 return ANEURALNETWORKS_BAD_DATA;
781 }
782 // There is no other validation we can do for an extension operation.
783 return ANEURALNETWORKS_NO_ERROR;
Miao Wang137d2782018-03-06 15:03:14 -0800784 }
785
786 auto logInvalidInOutNumber = [opType, inputCount, outputCount](int expIn, int expOut) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000787 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected " << expIn
788 << ") or output operands (" << outputCount << ", expected " << expOut
789 << ") for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800790 };
791
792 switch (opType) {
793 case ANEURALNETWORKS_OEM_OPERATION: {
794 return ANEURALNETWORKS_NO_ERROR;
795 }
Miao Wang137d2782018-03-06 15:03:14 -0800796 case ANEURALNETWORKS_FLOOR: {
797 if (inputCount != 1 || outputCount != 1) {
798 logInvalidInOutNumber(1, 1);
799 return ANEURALNETWORKS_BAD_DATA;
800 }
801 auto inputType = operands[inputIndexes[0]].type;
802 std::vector<OperandType> inExpectedTypes;
803 std::vector<OperandType> outExpectedTypes;
804 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sandersab24a3a2018-11-21 15:07:12 +0000805 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Miao Wang137d2782018-03-06 15:03:14 -0800806 inExpectedTypes = {OperandType::TENSOR_FLOAT32};
807 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sandersab24a3a2018-11-21 15:07:12 +0000808 } else if (inputType == OperandType::TENSOR_FLOAT16) {
809 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
810 inExpectedTypes = {OperandType::TENSOR_FLOAT16};
811 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800812 } else {
813 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000814 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800815 return ANEURALNETWORKS_BAD_DATA;
816 }
Michael Butler43953b82019-07-22 18:59:46 -0700817 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
818 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800819 outExpectedTypes);
820 }
Miao Wang137d2782018-03-06 15:03:14 -0800821 case ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION: {
Xusong Wang4e955462018-10-18 16:46:44 -0700822 if ((inputCount != 6 && inputCount != 5) || outputCount != 1) {
823 LOG(ERROR) << "Invalid number of input operands (" << inputCount
824 << ", expected 6 or 5) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000825 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800826 return ANEURALNETWORKS_BAD_DATA;
827 }
828 auto inputType = operands[inputIndexes[0]].type;
829 std::vector<OperandType> inExpectedTypes;
830 std::vector<OperandType> outExpectedTypes;
831 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sandersd09b4402018-11-27 09:45:00 +0000832 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
833 inExpectedTypes = {
834 OperandType::TENSOR_FLOAT32, OperandType::INT32, OperandType::FLOAT32,
835 OperandType::FLOAT32, OperandType::FLOAT32,
836 };
Miao Wang137d2782018-03-06 15:03:14 -0800837 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sandersd09b4402018-11-27 09:45:00 +0000838 } else if (inputType == OperandType::TENSOR_FLOAT16) {
839 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
840 inExpectedTypes = {
Xusong Wange2225d02018-12-05 15:35:40 -0800841 OperandType::TENSOR_FLOAT16, OperandType::INT32, OperandType::FLOAT16,
842 OperandType::FLOAT16, OperandType::FLOAT16,
Michael K. Sandersd09b4402018-11-27 09:45:00 +0000843 };
844 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800845 } else {
846 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000847 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800848 return ANEURALNETWORKS_BAD_DATA;
849 }
Xusong Wang4e955462018-10-18 16:46:44 -0700850 if (inputCount == 6) {
851 inExpectedTypes.push_back(OperandType::INT32);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000852 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Michael K. Sandersd09b4402018-11-27 09:45:00 +0000853 } else if (operands[inputIndexes[0]].dimensions.size() != 4) {
854 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Xusong Wang4e955462018-10-18 16:46:44 -0700855 }
Michael Butler43953b82019-07-22 18:59:46 -0700856 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
857 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800858 outExpectedTypes);
859 }
860 case ANEURALNETWORKS_RESHAPE: {
861 if (inputCount != 2 || outputCount != 1) {
862 logInvalidInOutNumber(2, 1);
863 return ANEURALNETWORKS_BAD_DATA;
864 }
865 auto inputType = operands[inputIndexes[0]].type;
866 std::vector<OperandType> inExpectedTypes;
867 std::vector<OperandType> outExpectedTypes;
868 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000869 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700870 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800871 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000872 } else if (inputType == OperandType::TENSOR_FLOAT16) {
873 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
874 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_INT32};
875 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800876 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000877 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700878 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800879 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleev5db57cf2019-11-25 15:11:31 +0000880 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
881 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
882 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
883 OperandType::TENSOR_INT32};
884 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800885 } else {
886 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000887 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800888 return ANEURALNETWORKS_BAD_DATA;
889 }
Michael Butler43953b82019-07-22 18:59:46 -0700890 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
891 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800892 outExpectedTypes);
893 }
Miao Wang137d2782018-03-06 15:03:14 -0800894 case ANEURALNETWORKS_DEPTH_TO_SPACE: {
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700895 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
896 LOG(ERROR) << "Invalid number of input operands (" << inputCount
897 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000898 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800899 return ANEURALNETWORKS_BAD_DATA;
900 }
901 auto inputType = operands[inputIndexes[0]].type;
902 std::vector<OperandType> inExpectedTypes;
903 std::vector<OperandType> outExpectedTypes;
904 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000905 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700906 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800907 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000908 } else if (inputType == OperandType::TENSOR_FLOAT16) {
909 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
910 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
911 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800912 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000913 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700914 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800915 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +0000916 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
917 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
918 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
919 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800920 } else {
921 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000922 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800923 return ANEURALNETWORKS_BAD_DATA;
924 }
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700925 if (inputCount == 3) {
926 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000927 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100928 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000929 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Xusong Wangdbd3e9b2018-09-26 13:25:26 -0700930 }
Michael Butler43953b82019-07-22 18:59:46 -0700931 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
932 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800933 outExpectedTypes);
934 }
935 case ANEURALNETWORKS_SPACE_TO_DEPTH: {
Xusong Wangbb83d492018-09-26 13:38:43 -0700936 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
937 LOG(ERROR) << "Invalid number of input operands (" << inputCount
938 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000939 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800940 return ANEURALNETWORKS_BAD_DATA;
941 }
942 auto inputType = operands[inputIndexes[0]].type;
943 std::vector<OperandType> inExpectedTypes;
944 std::vector<OperandType> outExpectedTypes;
945 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000946 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700947 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800948 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +0000949 } else if (inputType == OperandType::TENSOR_FLOAT16) {
950 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
951 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::INT32};
952 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -0800953 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +0000954 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -0700955 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM, OperandType::INT32};
Miao Wang137d2782018-03-06 15:03:14 -0800956 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +0000957 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
958 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
959 inExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED, OperandType::INT32};
960 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -0800961 } else {
962 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000963 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -0800964 return ANEURALNETWORKS_BAD_DATA;
965 }
Xusong Wangbb83d492018-09-26 13:38:43 -0700966 if (inputCount == 3) {
967 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000968 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +0100969 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +0000970 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Xusong Wangbb83d492018-09-26 13:38:43 -0700971 }
Michael Butler43953b82019-07-22 18:59:46 -0700972 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
973 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -0800974 outExpectedTypes);
975 }
Miao Wang137d2782018-03-06 15:03:14 -0800976 case ANEURALNETWORKS_EMBEDDING_LOOKUP: {
Michael Butlere538bb02018-03-26 14:24:49 -0700977 if (inputCount != 2 || outputCount != 1) {
978 logInvalidInOutNumber(2, 1);
979 return ANEURALNETWORKS_BAD_DATA;
980 }
981 auto inputType = operands[inputIndexes[1]].type;
Lev Proleev23776d52019-12-09 18:05:55 +0000982 if (inputType != OperandType::TENSOR_FLOAT16 &&
983 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +0000984 inputType != OperandType::TENSOR_INT32 &&
Lev Proleev23776d52019-12-09 18:05:55 +0000985 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
986 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevabe34fa2018-10-30 15:05:39 +0000987 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +0000988 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +0000989 return ANEURALNETWORKS_BAD_DATA;
990 }
Michael Butler43953b82019-07-22 18:59:46 -0700991 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32, inputType};
Michael Butlere538bb02018-03-26 14:24:49 -0700992 std::vector<OperandType> outExpectedTypes = {inputType};
Lev Proleev23776d52019-12-09 18:05:55 +0000993 if (inputType == OperandType::TENSOR_FLOAT16 ||
994 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
995 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
996 } else if (inputType == OperandType::TENSOR_INT32 ||
997 inputType == OperandType::TENSOR_QUANT8_ASYMM) {
998 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
999 } else {
1000 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1001 }
Michael Butler43953b82019-07-22 18:59:46 -07001002 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1003 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001004 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001005 }
1006 case ANEURALNETWORKS_HASHTABLE_LOOKUP: {
Michael Butlere538bb02018-03-26 14:24:49 -07001007 if (inputCount != 3 || outputCount != 2) {
1008 logInvalidInOutNumber(3, 2);
1009 return ANEURALNETWORKS_BAD_DATA;
1010 }
1011 auto inputType = operands[inputIndexes[2]].type;
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001012 if (inputType != OperandType::TENSOR_FLOAT32 &&
1013 inputType != OperandType::TENSOR_INT32 &&
1014 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
1015 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001016 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001017 return ANEURALNETWORKS_BAD_DATA;
1018 }
Michael Butlere538bb02018-03-26 14:24:49 -07001019 std::vector<OperandType> inExpectedTypes = {OperandType::TENSOR_INT32,
Michael Butler43953b82019-07-22 18:59:46 -07001020 OperandType::TENSOR_INT32, inputType};
Michael Butlere538bb02018-03-26 14:24:49 -07001021 std::vector<OperandType> outExpectedTypes = {inputType,
1022 OperandType::TENSOR_QUANT8_ASYMM};
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001023 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Michael Butler43953b82019-07-22 18:59:46 -07001024 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1025 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001026 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001027 }
1028 case ANEURALNETWORKS_LSH_PROJECTION: {
Michael Butlere538bb02018-03-26 14:24:49 -07001029 if (inputCount != 4 || outputCount != 1) {
1030 logInvalidInOutNumber(4, 1);
1031 return ANEURALNETWORKS_BAD_DATA;
1032 }
1033 auto inputType = operands[inputIndexes[1]].type;
Michael K. Sanders18493a62018-12-07 14:30:34 +00001034 if (inputType != OperandType::TENSOR_FLOAT16 &&
1035 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001036 inputType != OperandType::TENSOR_INT32 &&
1037 inputType != OperandType::TENSOR_QUANT8_ASYMM) {
1038 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001039 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001040 return ANEURALNETWORKS_BAD_DATA;
1041 }
Michael K. Sanders18493a62018-12-07 14:30:34 +00001042 auto hashType = operands[inputIndexes[0]].type;
1043 std::vector<OperandType> inExpectedTypes;
1044 if (hashType == OperandType::TENSOR_FLOAT16) {
1045 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1046 inExpectedTypes = {
1047 OperandType::TENSOR_FLOAT16,
1048 inputType,
1049 OperandType::TENSOR_FLOAT16,
1050 OperandType::INT32,
1051 };
1052 } else if (hashType == OperandType::TENSOR_FLOAT32) {
1053 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1054 inExpectedTypes = {
1055 OperandType::TENSOR_FLOAT32,
1056 inputType,
1057 OperandType::TENSOR_FLOAT32,
1058 OperandType::INT32,
1059 };
1060 } else {
1061 LOG(ERROR) << "Unsupported hash tensor type for operation "
1062 << getOperationName(opType);
1063 return ANEURALNETWORKS_BAD_DATA;
1064 }
Michael Butlere538bb02018-03-26 14:24:49 -07001065 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
Michael K. Sanders18493a62018-12-07 14:30:34 +00001066 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1067 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001068 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001069 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001070 case ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM: {
1071 std::vector<OperandType> inExpectedTypes;
1072 auto inputType = operands[inputIndexes[0]].type;
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001073 if (inputType != OperandType::TENSOR_FLOAT32 &&
1074 inputType != OperandType::TENSOR_FLOAT16) {
1075 LOG(ERROR) << "Unsupported input tensor type for operation "
1076 << getOperationName(opType);
1077 return ANEURALNETWORKS_BAD_DATA;
1078 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001079
1080 inExpectedTypes = {};
1081 for (int i = 0; i < 48; ++i) {
1082 inExpectedTypes.push_back(inputType);
1083 }
1084 inExpectedTypes.push_back(OperandType::INT32);
1085 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1086 ? OperandType::FLOAT32
1087 : OperandType::FLOAT16);
1088 inExpectedTypes.push_back(inputType == OperandType::TENSOR_FLOAT32
1089 ? OperandType::FLOAT32
1090 : OperandType::FLOAT16);
1091 inExpectedTypes.push_back(OperandType::BOOL);
1092 inExpectedTypes.push_back(OperandType::BOOL);
Viet Dang985c2142019-03-19 23:19:27 +00001093 for (int i = 0; i < 8; ++i) {
1094 inExpectedTypes.push_back(inputType);
1095 }
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001096
Lev Proleev42f87922020-01-14 15:40:39 +00001097 const uint32_t kNumOutputs = 2;
1098 const uint32_t kNumOutputsMerged = 1;
1099 const uint32_t kNumOutputsWithState = 6;
1100 const uint32_t kNumOutputsMergedWithState = 5;
1101
1102 if (inputCount != 61 ||
1103 (outputCount != kNumOutputs && outputCount != kNumOutputsMerged &&
1104 outputCount != kNumOutputsWithState &&
1105 outputCount != kNumOutputsMergedWithState)) {
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001106 LOG(ERROR) << "Invalid number of input operands (" << inputCount
Viet Dang4c927942019-03-27 12:00:18 +00001107 << ", expected 61) or output operands (" << outputCount
Lev Proleev42f87922020-01-14 15:40:39 +00001108 << ", expected 1, 2, 5 or 6) for operation " << getOperationName(opType);
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001109 return ANEURALNETWORKS_BAD_DATA;
1110 }
Lev Proleev42f87922020-01-14 15:40:39 +00001111 HalVersion minSupportedHalVersion = HalVersion::V1_2;
1112 if (outputCount == kNumOutputsWithState || outputCount == kNumOutputsMergedWithState) {
1113 minSupportedHalVersion = HalVersion::V1_3;
1114 }
1115 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, minSupportedHalVersion));
1116 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
Viet Dang4c927942019-03-27 12:00:18 +00001117 auto status = validateOperationOperandTypes(operands, inputCount, inputIndexes,
1118 inExpectedTypes, outputCount, outputIndexes,
1119 outExpectedTypes);
Viet Dang4c927942019-03-27 12:00:18 +00001120 return status;
Michael K. Sanders2db63b72019-01-22 17:47:45 +00001121 }
Miao Wang137d2782018-03-06 15:03:14 -08001122 case ANEURALNETWORKS_LSTM: {
Lev Proleev7809c452018-11-08 12:06:38 +00001123 std::vector<OperandType> inExpectedTypes;
1124 std::vector<OperandType> outExpectedTypes;
Lev Proleev1d246aa2018-12-28 13:24:24 +00001125 auto inputType = operands[inputIndexes[0]].type;
1126 if (inputType != OperandType::TENSOR_FLOAT32 &&
1127 inputType != OperandType::TENSOR_FLOAT16) {
1128 LOG(ERROR) << "Unsupported input tensor type for operation "
1129 << getOperationName(opType);
1130 return ANEURALNETWORKS_BAD_DATA;
1131 }
1132
1133 inExpectedTypes = {inputType, inputType, inputType, inputType, inputType,
1134 inputType, inputType, inputType, inputType, inputType,
1135 inputType, inputType, inputType, inputType, inputType,
1136 inputType, inputType, inputType, inputType, inputType,
1137 OperandType::INT32};
1138 if (inputType == OperandType::TENSOR_FLOAT32) {
1139 inExpectedTypes.push_back(OperandType::FLOAT32);
1140 inExpectedTypes.push_back(OperandType::FLOAT32);
1141 } else {
Slava Shklyaev2e79d152019-05-07 14:41:18 +01001142 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleev1d246aa2018-12-28 13:24:24 +00001143 inExpectedTypes.push_back(OperandType::FLOAT16);
1144 inExpectedTypes.push_back(OperandType::FLOAT16);
1145 }
1146
1147 outExpectedTypes = {inputType, inputType, inputType, inputType};
Lev Proleev7809c452018-11-08 12:06:38 +00001148 if (inputCount == 23 && outputCount == 4) {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001149 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
Lev Proleev7809c452018-11-08 12:06:38 +00001150 } else if (inputCount == 27 && outputCount == 4) {
Lev Proleev1d246aa2018-12-28 13:24:24 +00001151 for (int i = 0; i < 4; ++i) {
1152 inExpectedTypes.push_back(inputType);
1153 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001154 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleev7809c452018-11-08 12:06:38 +00001155 } else {
1156 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1157 << ", expected 23 or 27) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001158 << ", expected 4) for operation " << getOperationName(opType);
Michael Butlere538bb02018-03-26 14:24:49 -07001159 return ANEURALNETWORKS_BAD_DATA;
1160 }
Lev Proleev1d246aa2018-12-28 13:24:24 +00001161 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1162 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001163 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001164 }
Lev Proleevb24320a2018-09-25 11:05:58 +01001165 case ANEURALNETWORKS_QUANTIZED_16BIT_LSTM: {
Lev Proleevd9a1a522018-12-21 17:02:56 +00001166 if (inputCount != 15 || outputCount != 2) {
1167 logInvalidInOutNumber(15, 2);
Lev Proleevb24320a2018-09-25 11:05:58 +01001168 return ANEURALNETWORKS_BAD_DATA;
1169 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001170 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Lev Proleevb24320a2018-09-25 11:05:58 +01001171 std::vector<OperandType> inExpectedTypes = {
1172 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
Lev Proleevd9a1a522018-12-21 17:02:56 +00001173 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1174 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
1175 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_QUANT8_ASYMM,
Lev Proleevb24320a2018-09-25 11:05:58 +01001176 OperandType::TENSOR_QUANT8_ASYMM, OperandType::TENSOR_INT32,
Lev Proleevd9a1a522018-12-21 17:02:56 +00001177 OperandType::TENSOR_INT32, OperandType::TENSOR_INT32,
1178 OperandType::TENSOR_INT32, OperandType::TENSOR_QUANT16_SYMM,
1179 OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleevc077c612018-12-20 15:58:35 +00001180 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_QUANT16_SYMM,
1181 OperandType::TENSOR_QUANT8_ASYMM};
Lev Proleevb24320a2018-09-25 11:05:58 +01001182 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1183 inExpectedTypes, outputCount, outputIndexes,
1184 outExpectedTypes);
1185 }
Michael K. Sanders62498452018-09-25 11:53:55 +01001186 case ANEURALNETWORKS_RANDOM_MULTINOMIAL: {
1187 if (inputCount != 3 || outputCount != 1) {
Lev Proleevc9fd2062019-01-28 14:15:10 +00001188 logInvalidInOutNumber(3, 1);
Michael K. Sanders62498452018-09-25 11:53:55 +01001189 return ANEURALNETWORKS_BAD_DATA;
1190 }
Michael K. Sanders688b2552018-11-14 11:09:46 +00001191 OperandType inputType = operands[inputIndexes[0]].type;
1192 std::vector<OperandType> inExpectedTypes;
Michael K. Sanders95c6c352018-12-07 10:08:07 +00001193 if (inputType == OperandType::TENSOR_FLOAT32 ||
1194 inputType == OperandType::TENSOR_FLOAT16) {
Michael K. Sanders688b2552018-11-14 11:09:46 +00001195 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1196 inExpectedTypes = {
Michael K. Sanders95c6c352018-12-07 10:08:07 +00001197 inputType,
Michael K. Sanders688b2552018-11-14 11:09:46 +00001198 OperandType::INT32,
1199 OperandType::TENSOR_INT32,
1200 };
1201 } else {
1202 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaev6e912802019-01-07 14:16:49 +00001203 << getOperationName(opType);
Michael K. Sanders688b2552018-11-14 11:09:46 +00001204 return ANEURALNETWORKS_BAD_DATA;
1205 }
Michael K. Sanders62498452018-09-25 11:53:55 +01001206 std::vector<OperandType> outExpectedTypes = {OperandType::TENSOR_INT32};
1207 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1208 inExpectedTypes, outputCount, outputIndexes,
1209 outExpectedTypes);
1210 }
Miao Wang137d2782018-03-06 15:03:14 -08001211 case ANEURALNETWORKS_RNN: {
Michael Butlere538bb02018-03-26 14:24:49 -07001212 if (inputCount != 6 || outputCount != 2) {
1213 logInvalidInOutNumber(6, 2);
1214 return ANEURALNETWORKS_BAD_DATA;
1215 }
Michael K. Sandersca549302018-12-06 16:29:07 +00001216 OperandType inputType = operands[inputIndexes[0]].type;
1217 std::vector<OperandType> inExpectedTypes;
1218 std::vector<OperandType> outExpectedTypes;
1219 if (inputType == OperandType::TENSOR_FLOAT32) {
1220 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1221 inExpectedTypes = {
1222 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1223 OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1224 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1225 };
1226 outExpectedTypes = {
1227 OperandType::TENSOR_FLOAT32,
1228 OperandType::TENSOR_FLOAT32,
1229 };
1230 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1231 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1232 inExpectedTypes = {
1233 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1234 OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1235 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1236 };
1237 outExpectedTypes = {
1238 OperandType::TENSOR_FLOAT16,
1239 OperandType::TENSOR_FLOAT16,
1240 };
1241 } else {
1242 LOG(ERROR) << "Unsupported input tensor type for operation "
1243 << getOperationName(opType);
1244 return ANEURALNETWORKS_BAD_DATA;
1245 }
1246 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1247 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001248 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001249 }
1250 case ANEURALNETWORKS_SVDF: {
Michael Butlere538bb02018-03-26 14:24:49 -07001251 if (inputCount != 7 || outputCount != 2) {
1252 logInvalidInOutNumber(7, 2);
1253 return ANEURALNETWORKS_BAD_DATA;
1254 }
Michael K. Sanders105714c2018-12-13 10:43:43 +00001255 OperandType inputType = operands[inputIndexes[0]].type;
1256 if (inputType == OperandType::TENSOR_FLOAT32) {
1257 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
1258
1259 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1260 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1261 } else {
1262 LOG(ERROR) << "Unsupported input tensor type for operation "
1263 << getOperationName(opType);
1264 return ANEURALNETWORKS_BAD_DATA;
1265 }
1266 std::vector<OperandType> inExpectedTypes = {
1267 inputType, inputType, inputType, inputType,
1268 inputType, OperandType::INT32, OperandType::INT32,
1269 };
1270 std::vector<OperandType> outExpectedTypes = {inputType, inputType};
1271 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1272 inExpectedTypes, outputCount, outputIndexes,
Michael Butlere538bb02018-03-26 14:24:49 -07001273 outExpectedTypes);
Miao Wang137d2782018-03-06 15:03:14 -08001274 }
1275 case ANEURALNETWORKS_BATCH_TO_SPACE_ND: {
Xusong Wange7523442018-09-26 14:19:14 -07001276 if ((inputCount != 3 && inputCount != 2) || outputCount != 1) {
1277 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1278 << ", expected 3 or 2) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001279 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001280 return ANEURALNETWORKS_BAD_DATA;
1281 }
1282 auto inputType = operands[inputIndexes[0]].type;
1283 std::vector<OperandType> inExpectedTypes;
1284 std::vector<OperandType> outExpectedTypes;
1285 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001286 inExpectedTypes = {
1287 OperandType::TENSOR_FLOAT32,
1288 OperandType::TENSOR_INT32,
1289 };
Miao Wang137d2782018-03-06 15:03:14 -08001290 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001291 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1292 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1293 inExpectedTypes = {
1294 OperandType::TENSOR_FLOAT16,
1295 OperandType::TENSOR_INT32,
1296 };
1297 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -08001298 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001299 inExpectedTypes = {
1300 OperandType::TENSOR_QUANT8_ASYMM,
1301 OperandType::TENSOR_INT32,
1302 };
Miao Wang137d2782018-03-06 15:03:14 -08001303 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001304 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1305 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1306 inExpectedTypes = {
1307 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1308 OperandType::TENSOR_INT32,
1309 };
1310 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -08001311 } else {
1312 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001313 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001314 return ANEURALNETWORKS_BAD_DATA;
1315 }
Xusong Wange7523442018-09-26 14:19:14 -07001316 if (inputCount == 3) {
1317 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001318 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +01001319 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001320 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Xusong Wange7523442018-09-26 14:19:14 -07001321 }
Michael Butler43953b82019-07-22 18:59:46 -07001322 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1323 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001324 outExpectedTypes);
1325 }
1326 case ANEURALNETWORKS_SPACE_TO_BATCH_ND: {
Xusong Wang36782bb2018-09-26 14:45:34 -07001327 if ((inputCount != 4 && inputCount != 3) || outputCount != 1) {
1328 LOG(ERROR) << "Invalid number of input operands (" << inputCount
1329 << ", expected 4 or 3) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001330 << ", expected 1) for operation " << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001331 return ANEURALNETWORKS_BAD_DATA;
1332 }
1333 auto inputType = operands[inputIndexes[0]].type;
1334 std::vector<OperandType> inExpectedTypes;
1335 std::vector<OperandType> outExpectedTypes;
1336 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001337 inExpectedTypes = {
1338 OperandType::TENSOR_FLOAT32,
1339 OperandType::TENSOR_INT32,
1340 OperandType::TENSOR_INT32,
1341 };
Miao Wang137d2782018-03-06 15:03:14 -08001342 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001343 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1344 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1345 inExpectedTypes = {
1346 OperandType::TENSOR_FLOAT16,
1347 OperandType::TENSOR_INT32,
1348 OperandType::TENSOR_INT32,
1349 };
1350 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Miao Wang137d2782018-03-06 15:03:14 -08001351 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001352 if (operands[inputIndexes[0]].zeroPoint != 0) {
1353 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1354 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001355 inExpectedTypes = {
1356 OperandType::TENSOR_QUANT8_ASYMM,
1357 OperandType::TENSOR_INT32,
1358 OperandType::TENSOR_INT32,
1359 };
Miao Wang137d2782018-03-06 15:03:14 -08001360 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001361 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1362 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1363 inExpectedTypes = {
1364 OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
1365 OperandType::TENSOR_INT32,
1366 OperandType::TENSOR_INT32,
1367 };
1368 outExpectedTypes = {OperandType::TENSOR_QUANT8_ASYMM_SIGNED};
Miao Wang137d2782018-03-06 15:03:14 -08001369 } else {
1370 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001371 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001372 return ANEURALNETWORKS_BAD_DATA;
1373 }
Xusong Wang36782bb2018-09-26 14:45:34 -07001374 if (inputCount == 4) {
1375 inExpectedTypes.push_back(OperandType::BOOL);
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001376 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaev0a832b72018-10-15 14:57:36 +01001377 } else {
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001378 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Xusong Wang36782bb2018-09-26 14:45:34 -07001379 }
Michael Butler43953b82019-07-22 18:59:46 -07001380 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1381 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001382 outExpectedTypes);
1383 }
1384 case ANEURALNETWORKS_PAD: {
1385 if (inputCount != 2 || outputCount != 1) {
1386 logInvalidInOutNumber(2, 1);
1387 return ANEURALNETWORKS_BAD_DATA;
1388 }
1389 auto inputType = operands[inputIndexes[0]].type;
1390 std::vector<OperandType> inExpectedTypes;
1391 std::vector<OperandType> outExpectedTypes;
1392 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001393 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
1394 inExpectedTypes = {
1395 OperandType::TENSOR_FLOAT32,
1396 OperandType::TENSOR_INT32,
1397 };
Miao Wang137d2782018-03-06 15:03:14 -08001398 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001399 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1400 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1401 inExpectedTypes = {
1402 OperandType::TENSOR_FLOAT16,
1403 OperandType::TENSOR_INT32,
1404 };
1405 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001406 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1407 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1408 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1409 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001410 } else {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001411 if (operands[inputIndexes[0]].zeroPoint == 0) {
1412 NN_RETURN_IF_ERROR(
1413 validateHalVersion(opType, halVersion, HalVersion::V1_1));
1414 } else {
1415 NN_RETURN_IF_ERROR(
1416 validateHalVersion(opType, halVersion, HalVersion::V1_2));
1417 }
Slava Shklyaev9f618df2019-05-10 10:40:35 +01001418 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001419 inExpectedTypes = {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001420 inputType,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001421 OperandType::TENSOR_INT32,
1422 };
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001423 outExpectedTypes = {inputType};
Miao Wang137d2782018-03-06 15:03:14 -08001424 } else {
1425 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001426 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001427 return ANEURALNETWORKS_BAD_DATA;
1428 }
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001429 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1430 inExpectedTypes, outputCount, outputIndexes,
1431 outExpectedTypes);
1432 }
1433 case ANEURALNETWORKS_PAD_V2: {
1434 if (inputCount != 3 || outputCount != 1) {
Lev Proleev08ca0742019-04-16 14:50:30 +01001435 logInvalidInOutNumber(3, 1);
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001436 return ANEURALNETWORKS_BAD_DATA;
1437 }
1438 auto inputType = operands[inputIndexes[0]].type;
1439 std::vector<OperandType> inExpectedTypes;
1440 std::vector<OperandType> outExpectedTypes;
1441 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sanders4b322522018-11-16 11:31:43 +00001442 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1443 inExpectedTypes = {
1444 OperandType::TENSOR_FLOAT32,
1445 OperandType::TENSOR_INT32,
1446 OperandType::FLOAT32,
1447 };
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001448 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Michael K. Sanders4b322522018-11-16 11:31:43 +00001449 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1450 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1451 inExpectedTypes = {
1452 OperandType::TENSOR_FLOAT16,
1453 OperandType::TENSOR_INT32,
Xusong Wange2225d02018-12-05 15:35:40 -08001454 OperandType::FLOAT16,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001455 };
1456 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001457 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1458 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1459 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1460 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1461 } else {
1462 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1463 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001464 inExpectedTypes = {
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001465 inputType,
Michael K. Sanders4b322522018-11-16 11:31:43 +00001466 OperandType::TENSOR_INT32,
1467 OperandType::INT32,
1468 }; // TODO(b/116699425): Make it UINT8.
Przemyslaw Szczepaniak8c915bb2019-11-22 14:59:43 +00001469 outExpectedTypes = {inputType};
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001470 } else {
1471 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001472 << getOperationName(opType);
Slava Shklyaev3ee54922018-09-21 15:40:03 +01001473 return ANEURALNETWORKS_BAD_DATA;
1474 }
Michael K. Sanders4b322522018-11-16 11:31:43 +00001475 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1476 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001477 outExpectedTypes);
1478 }
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001479 case ANEURALNETWORKS_CAST: {
1480 if (inputCount != 1 || outputCount != 1) {
1481 logInvalidInOutNumber(1, 1);
1482 return ANEURALNETWORKS_BAD_DATA;
1483 }
1484 auto inputType = operands[inputIndexes[0]].type;
1485 auto outputType = operands[outputIndexes[0]].type;
1486 std::vector<OperandType> inExpectedTypes;
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001487 std::vector<OperandType> outExpectedTypes;
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001488 if ((inputType == OperandType::TENSOR_FLOAT16 ||
1489 inputType == OperandType::TENSOR_FLOAT32 ||
1490 inputType == OperandType::TENSOR_INT32 ||
1491 inputType == OperandType::TENSOR_QUANT8_ASYMM) &&
1492 (outputType == OperandType::TENSOR_FLOAT16 ||
1493 outputType == OperandType::TENSOR_FLOAT32 ||
1494 outputType == OperandType::TENSOR_INT32 ||
1495 outputType == OperandType::TENSOR_QUANT8_ASYMM)) {
1496 inExpectedTypes = {inputType};
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001497 outExpectedTypes = {outputType};
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001498 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1499 } else if (inputType == OperandType::TENSOR_BOOL8 ||
1500 inputType == OperandType::TENSOR_QUANT16_ASYMM ||
1501 inputType == OperandType::TENSOR_QUANT16_SYMM ||
1502 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
1503 inputType == OperandType::TENSOR_QUANT8_SYMM) {
1504 inExpectedTypes = {inputType};
1505 outExpectedTypes = {inputType}; // Only identity CAST is supported.
1506 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001507 } else {
Slava Shklyaevf0b2fb92019-11-25 16:20:39 +00001508 LOG(ERROR) << "Unsupported data type for operation " << getOperationName(opType);
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001509 return ANEURALNETWORKS_BAD_DATA;
1510 }
Slava Shklyaev15ccb1f2018-10-12 16:30:21 +01001511 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1512 inExpectedTypes, outputCount, outputIndexes,
1513 outExpectedTypes);
1514 }
Miao Wang137d2782018-03-06 15:03:14 -08001515 case ANEURALNETWORKS_MEAN: {
1516 if (inputCount != 3 || outputCount != 1) {
1517 logInvalidInOutNumber(3, 1);
1518 return ANEURALNETWORKS_BAD_DATA;
1519 }
1520 auto inputType = operands[inputIndexes[0]].type;
Miao Wang137d2782018-03-06 15:03:14 -08001521 if (inputType == OperandType::TENSOR_FLOAT32) {
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001522 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001523 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1524 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Miao Wang137d2782018-03-06 15:03:14 -08001525 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
Michael K. Sandersab24a3a2018-11-21 15:07:12 +00001526 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_1));
Lev Proleev1ee05fa2019-11-25 16:01:01 +00001527 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1528 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
Miao Wang137d2782018-03-06 15:03:14 -08001529 } else {
1530 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001531 << getOperationName(opType);
Miao Wang137d2782018-03-06 15:03:14 -08001532 return ANEURALNETWORKS_BAD_DATA;
1533 }
Lev Proleev1ee05fa2019-11-25 16:01:01 +00001534 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::TENSOR_INT32,
1535 OperandType::INT32};
1536 std::vector<OperandType> outExpectedTypes = {inputType};
Michael Butler43953b82019-07-22 18:59:46 -07001537 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1538 inExpectedTypes, outputCount, outputIndexes,
Miao Wang137d2782018-03-06 15:03:14 -08001539 outExpectedTypes);
1540 }
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001541 case ANEURALNETWORKS_ARGMAX:
1542 case ANEURALNETWORKS_ARGMIN: {
1543 if (inputCount != 2 || outputCount != 1) {
1544 logInvalidInOutNumber(2, 1);
1545 return ANEURALNETWORKS_BAD_DATA;
1546 }
1547 auto inputType = operands[inputIndexes[0]].type;
1548 std::vector<OperandType> inExpectedTypes;
1549 std::vector<OperandType> outExpectedTypes;
Slava Shklyaev452dc792018-11-27 19:05:38 +00001550 if (inputType == OperandType::TENSOR_FLOAT16 ||
1551 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001552 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniaka7f10cd2019-11-28 13:27:49 +00001553 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1554 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001555 inExpectedTypes = {inputType, OperandType::INT32};
1556 outExpectedTypes = {OperandType::TENSOR_INT32};
1557 } else {
1558 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001559 << getOperationName(opType);
Slava Shklyaev6c41f082018-09-17 11:58:40 +01001560 return ANEURALNETWORKS_BAD_DATA;
1561 }
Slava Shklyaev42cb8d72018-11-07 10:44:21 +00001562 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001563 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1564 inExpectedTypes, outputCount, outputIndexes,
1565 outExpectedTypes);
1566 }
1567 case ANEURALNETWORKS_EXPAND_DIMS: {
1568 if (inputCount != 2 || outputCount != 1) {
1569 logInvalidInOutNumber(2, 1);
1570 return ANEURALNETWORKS_BAD_DATA;
1571 }
1572 auto inputType = operands[inputIndexes[0]].type;
1573 std::vector<OperandType> inExpectedTypes;
1574 std::vector<OperandType> outExpectedTypes;
Lev Proleeve574bf12018-12-01 00:29:59 +00001575 if (inputType == OperandType::TENSOR_FLOAT16 ||
1576 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001577 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniakd413c3b2019-12-10 13:42:05 +00001578 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1579 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001580 inExpectedTypes = {inputType, OperandType::INT32};
1581 outExpectedTypes = {inputType};
1582 } else {
1583 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001584 << getOperationName(opType);
Slava Shklyaevb629ef12018-09-27 15:13:47 +01001585 return ANEURALNETWORKS_BAD_DATA;
1586 }
Przemyslaw Szczepaniakd413c3b2019-12-10 13:42:05 +00001587 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1588 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1589 } else {
1590 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1591 }
Slava Shklyaev818d7fb2018-10-16 16:06:27 +01001592 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1593 inExpectedTypes, outputCount, outputIndexes,
1594 outExpectedTypes);
1595 }
Lev Proleevdca98d02018-09-13 16:17:58 +01001596 case ANEURALNETWORKS_SPLIT: {
1597 if (inputCount != 3) {
1598 LOG(ERROR) << "Invalid number of input operands (" << inputCount << ", expected 3)"
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001599 << getOperationName(opType);
Lev Proleevdca98d02018-09-13 16:17:58 +01001600 return ANEURALNETWORKS_BAD_DATA;
1601 }
1602 auto inputType = operands[inputIndexes[0]].type;
Lev Proleev4dd179e2018-11-15 15:14:12 +00001603 if (inputType != OperandType::TENSOR_FLOAT16 &&
1604 inputType != OperandType::TENSOR_FLOAT32 &&
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001605 inputType != OperandType::TENSOR_INT32 &&
Przemyslaw Szczepaniak88929bc2019-11-25 10:43:57 +00001606 inputType != OperandType::TENSOR_QUANT8_ASYMM &&
1607 inputType != OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001608 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001609 << getOperationName(opType);
Slava Shklyaevabe34fa2018-10-30 15:05:39 +00001610 return ANEURALNETWORKS_BAD_DATA;
1611 }
Przemyslaw Szczepaniak88929bc2019-11-25 10:43:57 +00001612 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1613 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1614 } else {
1615 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1616 }
Lev Proleevdca98d02018-09-13 16:17:58 +01001617 std::vector<OperandType> inExpectedTypes = {inputType, OperandType::INT32,
1618 OperandType::INT32};
1619 std::vector<OperandType> outExpectedTypes(outputCount, inputType);
1620 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1621 inExpectedTypes, outputCount, outputIndexes,
1622 outExpectedTypes);
1623 }
Slava Shklyaev18767482018-10-29 17:31:19 +00001624 case ANEURALNETWORKS_MAXIMUM:
1625 case ANEURALNETWORKS_MINIMUM: {
1626 if (inputCount != 2 || outputCount != 1) {
1627 logInvalidInOutNumber(2, 1);
1628 return ANEURALNETWORKS_BAD_DATA;
1629 }
1630 std::vector<OperandType> inExpectedTypes;
1631 std::vector<OperandType> outExpectedTypes;
1632 OperandType inputType = operands[inputIndexes[0]].type;
Slava Shklyaev53d76842018-11-05 12:22:56 +00001633 if (inputType == OperandType::TENSOR_FLOAT16 ||
1634 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev18767482018-10-29 17:31:19 +00001635 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniak743b2152019-11-28 11:25:21 +00001636 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1637 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev18767482018-10-29 17:31:19 +00001638 inExpectedTypes = {inputType, inputType};
1639 outExpectedTypes = {inputType};
1640 } else {
1641 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001642 << getOperationName(opType);
Slava Shklyaev18767482018-10-29 17:31:19 +00001643 return ANEURALNETWORKS_BAD_DATA;
1644 }
Przemyslaw Szczepaniaka7f10cd2019-11-28 13:27:49 +00001645 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1646 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1647 } else {
1648 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1649 }
Slava Shklyaev18767482018-10-29 17:31:19 +00001650 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1651 inExpectedTypes, outputCount, outputIndexes,
1652 outExpectedTypes);
1653 }
Xusong Wang85a0eb72018-08-17 15:38:32 -07001654 case ANEURALNETWORKS_GROUPED_CONV_2D: {
Xusong Wanga29428d2018-10-15 17:40:16 -07001655 if ((inputCount != 12 && inputCount != 9) || outputCount != 1) {
Xusong Wang85a0eb72018-08-17 15:38:32 -07001656 LOG(ERROR) << "Invalid number of input operands (" << inputCount
Xusong Wanga29428d2018-10-15 17:40:16 -07001657 << ", expected 12 or 9) or output operands (" << outputCount
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001658 << ", expected 1) for operation " << getOperationName(opType);
Xusong Wang85a0eb72018-08-17 15:38:32 -07001659 return ANEURALNETWORKS_BAD_DATA;
1660 }
1661 auto inputType = operands[inputIndexes[0]].type;
Przemyslaw Szczepaniak36298242018-12-28 11:52:32 +00001662 auto filterType = operands[inputIndexes[1]].type;
Xusong Wang85a0eb72018-08-17 15:38:32 -07001663 std::vector<OperandType> inExpectedTypes;
1664 std::vector<OperandType> outExpectedTypes;
1665 if (inputType == OperandType::TENSOR_FLOAT32) {
1666 inExpectedTypes = {OperandType::TENSOR_FLOAT32, OperandType::TENSOR_FLOAT32,
1667 OperandType::TENSOR_FLOAT32, OperandType::INT32,
1668 OperandType::INT32, OperandType::INT32,
1669 OperandType::INT32, OperandType::INT32};
1670 outExpectedTypes = {OperandType::TENSOR_FLOAT32};
Lev Proleeve5a548b2018-11-30 23:27:15 +00001671 } else if (inputType == OperandType::TENSOR_FLOAT16) {
1672 inExpectedTypes = {OperandType::TENSOR_FLOAT16, OperandType::TENSOR_FLOAT16,
1673 OperandType::TENSOR_FLOAT16, OperandType::INT32,
1674 OperandType::INT32, OperandType::INT32,
1675 OperandType::INT32, OperandType::INT32};
1676 outExpectedTypes = {OperandType::TENSOR_FLOAT16};
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001677 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1678 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1679 if (filterType != inputType &&
Przemyslaw Szczepaniak36298242018-12-28 11:52:32 +00001680 filterType != OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
1681 LOG(ERROR) << "Unsupported filter tensor type for operation "
1682 << getOperationName(opType);
1683 return ANEURALNETWORKS_BAD_DATA;
1684 }
1685
1686 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL &&
1687 operands[inputIndexes[1]].extraParams.channelQuant().channelDim != 0) {
1688 LOG(ERROR) << "Unsupported filter tensor channel dimension for operation "
1689 << getOperationName(opType);
1690 return ANEURALNETWORKS_BAD_DATA;
1691 }
1692
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001693 inExpectedTypes = {
1694 inputType, filterType, OperandType::TENSOR_INT32,
1695 OperandType::INT32, OperandType::INT32, OperandType::INT32,
1696 OperandType::INT32, OperandType::INT32};
1697 outExpectedTypes = {inputType};
Xusong Wang85a0eb72018-08-17 15:38:32 -07001698 } else {
1699 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001700 << getOperationName(opType);
Xusong Wang85a0eb72018-08-17 15:38:32 -07001701 return ANEURALNETWORKS_BAD_DATA;
1702 }
1703
Xusong Wanga29428d2018-10-15 17:40:16 -07001704 if (inputCount == 12) {
Xusong Wang85a0eb72018-08-17 15:38:32 -07001705 std::vector<OperandType> explicitScalarTypes(3, OperandType::INT32);
1706 inExpectedTypes.insert(inExpectedTypes.end(), explicitScalarTypes.begin(),
1707 explicitScalarTypes.end());
1708 }
Xusong Wanga29428d2018-10-15 17:40:16 -07001709 inExpectedTypes.push_back(OperandType::BOOL);
Przemyslaw Szczepaniakcab04432019-11-25 13:52:04 +00001710 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1711 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1712 } else {
1713 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1714 }
Xusong Wang85a0eb72018-08-17 15:38:32 -07001715 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1716 inExpectedTypes, outputCount, outputIndexes,
1717 outExpectedTypes);
1718 }
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001719 case ANEURALNETWORKS_TILE: {
1720 if (inputCount != 2 || outputCount != 1) {
1721 logInvalidInOutNumber(2, 1);
1722 return ANEURALNETWORKS_BAD_DATA;
1723 }
1724 auto inputType = operands[inputIndexes[0]].type;
1725 std::vector<OperandType> inExpectedTypes;
1726 std::vector<OperandType> outExpectedTypes;
Slava Shklyaev815cd4e2018-11-05 11:25:38 +00001727 if (inputType == OperandType::TENSOR_FLOAT16 ||
1728 inputType == OperandType::TENSOR_FLOAT32 ||
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001729 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniak54cab802019-11-21 19:37:50 +00001730 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1731 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001732 inExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1733 outExpectedTypes = {inputType};
1734 } else {
1735 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001736 << getOperationName(opType);
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001737 return ANEURALNETWORKS_BAD_DATA;
1738 }
Przemyslaw Szczepaniak54cab802019-11-21 19:37:50 +00001739 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1740 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1741 } else {
1742 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1743 }
Slava Shklyaev0e72ef52018-10-02 16:35:32 +01001744 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1745 inExpectedTypes, outputCount, outputIndexes,
1746 outExpectedTypes);
1747 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001748 case ANEURALNETWORKS_POW: {
1749 if (inputCount != 2 || outputCount != 1) {
1750 logInvalidInOutNumber(2, 1);
1751 return ANEURALNETWORKS_BAD_DATA;
1752 }
Lev Proleev21fbf2b2018-12-23 16:54:26 +00001753 auto inputType = operands[inputIndexes[0]].type;
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001754 std::vector<OperandType> inExpectedTypes;
1755 std::vector<OperandType> outExpectedTypes;
Lev Proleev21fbf2b2018-12-23 16:54:26 +00001756 if (inputType == OperandType::TENSOR_FLOAT16 ||
1757 inputType == OperandType::TENSOR_FLOAT32) {
1758 inExpectedTypes = {inputType, inputType};
1759 outExpectedTypes = {inputType};
1760 } else {
1761 LOG(ERROR) << "Unsupported input tensor type for operation "
1762 << getOperationName(opType);
1763 return ANEURALNETWORKS_BAD_DATA;
1764 }
Przemyslaw Szczepaniak743b2152019-11-28 11:25:21 +00001765 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1766 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1767 } else {
1768 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1769 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001770 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1771 inExpectedTypes, outputCount, outputIndexes,
1772 outExpectedTypes);
1773 }
Lev Proleev78045492018-11-05 15:51:48 +00001774 case ANEURALNETWORKS_TOPK_V2: {
1775 if (inputCount != 2 || outputCount != 2) {
1776 logInvalidInOutNumber(2, 1);
1777 return ANEURALNETWORKS_BAD_DATA;
1778 }
1779 std::vector<OperandType> inExpectedTypes;
1780 std::vector<OperandType> outExpectedTypes;
1781 OperandType inputType = operands[inputIndexes[0]].type;
Lev Proleevfdc60122018-11-15 11:51:08 +00001782 if (inputType == OperandType::TENSOR_FLOAT16 ||
1783 inputType == OperandType::TENSOR_FLOAT32 ||
Lev Proleev78045492018-11-05 15:51:48 +00001784 inputType == OperandType::TENSOR_INT32 ||
Przemyslaw Szczepaniak5a128c62019-11-25 11:50:55 +00001785 inputType == OperandType::TENSOR_QUANT8_ASYMM ||
1786 inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
Lev Proleev78045492018-11-05 15:51:48 +00001787 inExpectedTypes = {inputType, OperandType::INT32};
1788 outExpectedTypes = {inputType, OperandType::TENSOR_INT32};
1789 } else {
1790 LOG(ERROR) << "Unsupported input tensor type for operation "
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001791 << getOperationName(opType);
Lev Proleev78045492018-11-05 15:51:48 +00001792 return ANEURALNETWORKS_BAD_DATA;
1793 }
Przemyslaw Szczepaniak5a128c62019-11-25 11:50:55 +00001794 if (inputType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
1795 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1796 } else {
1797 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
1798 }
Lev Proleev78045492018-11-05 15:51:48 +00001799 return validateOperationOperandTypes(operands, inputCount, inputIndexes,
1800 inExpectedTypes, outputCount, outputIndexes,
1801 outExpectedTypes);
1802 }
Slava Shklyaev2e9f87e2019-12-13 16:43:26 +00001803 case ANEURALNETWORKS_IF: {
1804 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1805 return validateIfOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1806 operands, helper)
1807 ? ANEURALNETWORKS_NO_ERROR
1808 : ANEURALNETWORKS_BAD_DATA;
1809 }
1810 case ANEURALNETWORKS_WHILE: {
1811 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_3));
1812 return validateWhileOperation(inputCount, inputIndexes, outputCount, outputIndexes,
1813 operands, helper)
1814 ? ANEURALNETWORKS_NO_ERROR
1815 : ANEURALNETWORKS_BAD_DATA;
1816 }
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001817 default: {
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001818 const OperationRegistration* operationRegistration =
Slava Shklyaev44b23b42019-01-22 14:23:23 +00001819 BuiltinOperationResolver::get()->findOperation(
1820 static_cast<OperationType>(opType));
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001821 if (operationRegistration == nullptr) {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +00001822 if (0 <= opType && opType < kNumberOfOperationTypes) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001823 LOG(ERROR) << getOperationName(opType) << " not registered";
Przemyslaw Szczepaniakbb10cb42018-11-23 13:44:17 +00001824 } else {
Slava Shklyaevbecd62c2019-01-23 16:09:51 +00001825 LOG(ERROR) << "Operation type " << opType << " out of the range [0, "
1826 << kNumberOfOperationTypes << ")";
Przemyslaw Szczepaniakbb10cb42018-11-23 13:44:17 +00001827 }
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001828 return ANEURALNETWORKS_UNEXPECTED_NULL;
1829 }
1830 if (operationRegistration->validate == nullptr) {
Slava Shklyaevde9bfd22018-11-27 00:20:32 +00001831 LOG(ERROR) << "Incomplete operation registration: " << getOperationName(opType);
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001832 return ANEURALNETWORKS_UNEXPECTED_NULL;
1833 }
Slava Shklyaev432890e2019-09-30 16:04:43 +01001834 OperationValidationContext context(operationRegistration->name, inputCount,
1835 inputIndexes, outputCount, outputIndexes,
Slava Shklyaev3b1ea252018-11-06 15:32:44 +00001836 operands.data(), halVersion);
Slava Shklyaeve9b2e372019-04-23 10:49:53 +01001837 if (!operationRegistration->validate(&context)) {
1838 LOG(ERROR) << "Validation failed for operation " << getOperationName(opType);
1839 return ANEURALNETWORKS_BAD_DATA;
1840 }
1841 return ANEURALNETWORKS_NO_ERROR;
Lev Proleev4dc1ce82018-10-18 14:39:07 +01001842 }
Miao Wang137d2782018-03-06 15:03:14 -08001843 }
1844}
1845
David Gross07ed4d52018-04-06 14:52:52 -07001846ErrorStatus convertResultCodeToErrorStatus(int resultCode) {
1847 switch (resultCode) {
1848 case ANEURALNETWORKS_NO_ERROR:
1849 return ErrorStatus::NONE;
1850
1851 case ANEURALNETWORKS_BAD_DATA:
1852 case ANEURALNETWORKS_UNEXPECTED_NULL:
1853 return ErrorStatus::INVALID_ARGUMENT;
1854
Xusong Wange5be7ce2018-11-07 15:03:29 -08001855 case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
1856 return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
1857
Miao Wange5d644a2019-01-23 14:17:14 -08001858 case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
1859 return ErrorStatus::DEVICE_UNAVAILABLE;
1860
David Gross07ed4d52018-04-06 14:52:52 -07001861 case ANEURALNETWORKS_BAD_STATE:
1862 case ANEURALNETWORKS_INCOMPLETE:
1863 case ANEURALNETWORKS_OP_FAILED:
1864 case ANEURALNETWORKS_OUT_OF_MEMORY:
Miao Wang2dcdbd92018-04-23 10:36:24 -07001865 case ANEURALNETWORKS_UNMAPPABLE:
Michael Butlerf690d312019-12-12 16:25:03 -08001866 case ANEURALNETWORKS_DEAD_OBJECT:
David Gross07ed4d52018-04-06 14:52:52 -07001867 return ErrorStatus::GENERAL_FAILURE;
Michael Butlerf690d312019-12-12 16:25:03 -08001868
1869 case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
1870 return ErrorStatus::MISSED_DEADLINE_TRANSIENT;
1871 case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
1872 return ErrorStatus::MISSED_DEADLINE_PERSISTENT;
1873 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
1874 return ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT;
1875 case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
1876 return ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT;
David Gross07ed4d52018-04-06 14:52:52 -07001877 }
Michael Butlerf690d312019-12-12 16:25:03 -08001878 LOG(ERROR) << "Unknown result code " << resultCode << " mapped to ErrorStatus::GENERAL_FAILURE";
1879 return ErrorStatus::GENERAL_FAILURE;
David Gross07ed4d52018-04-06 14:52:52 -07001880}
1881
1882int convertErrorStatusToResultCode(ErrorStatus status) {
1883 switch (status) {
1884 case ErrorStatus::NONE:
1885 return ANEURALNETWORKS_NO_ERROR;
Miao Wange5d644a2019-01-23 14:17:14 -08001886 case ErrorStatus::DEVICE_UNAVAILABLE:
1887 return ANEURALNETWORKS_UNAVAILABLE_DEVICE;
David Gross07ed4d52018-04-06 14:52:52 -07001888 case ErrorStatus::GENERAL_FAILURE:
David Gross07ed4d52018-04-06 14:52:52 -07001889 return ANEURALNETWORKS_OP_FAILED;
Michael Butlerf690d312019-12-12 16:25:03 -08001890 case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
1891 return ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE;
1892 case ErrorStatus::INVALID_ARGUMENT:
1893 return ANEURALNETWORKS_BAD_DATA;
1894 case ErrorStatus::MISSED_DEADLINE_TRANSIENT:
1895 return ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT;
1896 case ErrorStatus::MISSED_DEADLINE_PERSISTENT:
1897 return ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT;
1898 case ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
1899 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT;
1900 case ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
1901 return ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT;
David Gross07ed4d52018-04-06 14:52:52 -07001902 }
Michael Butlerf690d312019-12-12 16:25:03 -08001903 LOG(ERROR) << "Unknown ErrorStatus " << toString(status)
1904 << " mapped to ANEURALNETWORKS_OP_FAILED";
1905 return ANEURALNETWORKS_OP_FAILED;
David Gross07ed4d52018-04-06 14:52:52 -07001906}
1907
Michael Butler4fe318d2019-08-17 17:40:29 -07001908std::tuple<int, std::vector<OutputShape>, Timing> getExecutionResult(
1909 ErrorStatus status, std::vector<OutputShape> outputShapes, Timing timing) {
1910 constexpr Timing kNoTiming = {std::numeric_limits<uint64_t>::max(),
1911 std::numeric_limits<uint64_t>::max()};
1912 const int n = convertErrorStatusToResultCode(status);
1913 if (status != ErrorStatus::NONE && status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE &&
1914 !outputShapes.empty()) {
1915 LOG(ERROR) << "The driver returned OutputShapes when it shouldn't.";
1916 outputShapes.clear();
1917 }
1918 if (status != ErrorStatus::NONE && timing != kNoTiming) {
1919 LOG(ERROR) << "The driver returned Timing when it shouldn't.";
1920 timing = kNoTiming;
1921 }
1922 return {n, std::move(outputShapes), timing};
1923}
1924
Xusong Wangc4b4cca2019-11-27 11:44:03 -08001925std::optional<std::vector<uint32_t>> combineDimensions(const std::vector<uint32_t>& lhs,
1926 const std::vector<uint32_t>& rhs) {
1927 if (rhs.empty()) return lhs;
1928 if (lhs.empty()) return rhs;
1929 if (lhs.size() != rhs.size()) {
1930 LOG(ERROR) << "Incompatible ranks: " << toString(lhs) << " and " << toString(rhs);
1931 return std::nullopt;
1932 }
1933 std::vector<uint32_t> combined = lhs;
1934 for (uint32_t i = 0; i < lhs.size(); i++) {
1935 if (lhs[i] == 0) {
1936 combined[i] = rhs[i];
1937 } else if (rhs[i] != 0 && lhs[i] != rhs[i]) {
1938 LOG(ERROR) << "Incompatible dimensions: " << toString(lhs) << " and " << toString(rhs);
1939 return std::nullopt;
1940 }
1941 }
1942 return combined;
1943}
1944
Colin Crossbd7f9c42019-10-10 22:58:13 +00001945// Capabilities::operandPerformance utilities.
1946// The field Capabilities::operandPerformance is a vector sorted by the field
1947// Capabilities::OperandPerformance::type.
David Gross5dd79af2019-03-18 15:33:53 -07001948
Colin Crossbd7f9c42019-10-10 22:58:13 +00001949template <HalVersion version>
1950hidl_vec<VersionedOperandPerformance<version>> nonExtensionOperandPerformance(
1951 PerformanceInfo perf) {
1952 using OpPerf = VersionedOperandPerformance<version>;
David Gross5dd79af2019-03-18 15:33:53 -07001953
1954 // Note: range presents enumerators in declaration order, not in numerical order.
Colin Crossbd7f9c42019-10-10 22:58:13 +00001955 static constexpr hidl_enum_range<VersionedOperandType<version>> kOperandTypeRange;
David Gross5dd79af2019-03-18 15:33:53 -07001956
Slava Shklyaev6bf64d12020-02-18 11:39:54 +00001957 std::vector<OpPerf> ret;
1958 ret.reserve(kOperandTypeRange.end() - kOperandTypeRange.begin());
1959 for (VersionedOperandType<version> type : kOperandTypeRange) {
1960 if (static_cast<OperandType>(type) != OperandType::SUBGRAPH) {
1961 ret.push_back(OpPerf{type, perf});
1962 }
1963 }
David Gross5dd79af2019-03-18 15:33:53 -07001964 std::sort(ret.begin(), ret.end(),
1965 [](const OpPerf& a, const OpPerf& b) { return a.type < b.type; });
1966
1967 return ret;
1968}
1969
Colin Crossbd7f9c42019-10-10 22:58:13 +00001970template hal::hidl_vec<V1_2::Capabilities::OperandPerformance>
1971nonExtensionOperandPerformance<HalVersion::V1_2>(PerformanceInfo perf);
1972template hal::hidl_vec<V1_3::Capabilities::OperandPerformance>
1973nonExtensionOperandPerformance<HalVersion::V1_3>(PerformanceInfo perf);
1974
1975template <HalVersion version>
1976void update(hal::hidl_vec<VersionedOperandPerformance<version>>* operandPerformance,
1977 VersionedOperandType<version> type, hal::PerformanceInfo perf) {
David Gross5dd79af2019-03-18 15:33:53 -07001978 CHECK(operandPerformance != nullptr);
Colin Crossbd7f9c42019-10-10 22:58:13 +00001979 const auto it =
1980 std::lower_bound(operandPerformance->begin(), operandPerformance->end(), type,
1981 [](const VersionedOperandPerformance<version>& perf,
1982 VersionedOperandType<version> type) { return perf.type < type; });
David Gross5dd79af2019-03-18 15:33:53 -07001983 CHECK(it != operandPerformance->end())
1984 << toString(type) << " not in " << toString(*operandPerformance);
1985 it->info = perf;
1986}
1987
Colin Crossbd7f9c42019-10-10 22:58:13 +00001988void update(hidl_vec<V1_2::Capabilities::OperandPerformance>* operandPerformance,
1989 V1_2::OperandType type, PerformanceInfo perf) {
1990 update<HalVersion::V1_2>(operandPerformance, type, perf);
1991}
1992void update(hidl_vec<V1_3::Capabilities::OperandPerformance>* operandPerformance,
1993 V1_3::OperandType type, PerformanceInfo perf) {
1994 update<HalVersion::V1_3>(operandPerformance, type, perf);
1995}
1996
1997template <HalVersion version>
1998PerformanceInfo lookup(const hidl_vec<VersionedOperandPerformance<version>>& operandPerformance,
1999 VersionedOperandType<version> type) {
David Gross5dd79af2019-03-18 15:33:53 -07002000 const auto it = std::lower_bound(operandPerformance.begin(), operandPerformance.end(), type,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002001 [](const VersionedOperandPerformance<version>& perf,
2002 VersionedOperandType<version> type) {
2003 return static_cast<OperandType>(perf.type) <
2004 static_cast<OperandType>(type);
2005 });
David Gross5dd79af2019-03-18 15:33:53 -07002006 if (it == operandPerformance.end()) {
2007 LOG(WARNING) << "No PerformanceInfo for " << toString(type);
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002008 return kNoPerformanceInfo;
David Gross5dd79af2019-03-18 15:33:53 -07002009 } else {
2010 return it->info;
2011 }
2012}
2013
Colin Crossbd7f9c42019-10-10 22:58:13 +00002014PerformanceInfo lookup(const hidl_vec<V1_2::Capabilities::OperandPerformance>& operandPerformance,
2015 V1_2::OperandType type) {
2016 return lookup<HalVersion::V1_2>(operandPerformance, type);
2017}
2018PerformanceInfo lookup(const hidl_vec<V1_3::Capabilities::OperandPerformance>& operandPerformance,
2019 V1_3::OperandType type) {
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002020 CHECK(type != V1_3::OperandType::SUBGRAPH)
2021 << "Use Capabilities::ifPerformance or Capabilities::whilePerformance";
Colin Crossbd7f9c42019-10-10 22:58:13 +00002022 return lookup<HalVersion::V1_3>(operandPerformance, type);
2023}
2024
Michael Butler75886e72018-01-23 11:05:43 -08002025// Versioning
2026
David Gross5dd79af2019-03-18 15:33:53 -07002027// In Android P, most data types are treated as having the same performance as TENSOR_QUANT8_ASYMM.
2028// This array must be in sorted order.
2029static const OperandType kQuantized8PerformanceConsistentWithP[] = {
2030 OperandType::INT32, OperandType::UINT32, OperandType::TENSOR_INT32, OperandType::OEM,
2031 OperandType::TENSOR_OEM_BYTE};
2032
2033static bool isQuantized8PerformanceConsistentWithP(const V1_2::Capabilities& capabilities) {
2034 const PerformanceInfo quantized8Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002035 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM);
2036 return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
2037 std::end(kQuantized8PerformanceConsistentWithP),
2038 [quantized8Performance, &capabilities](OperandType type) {
2039 return quantized8Performance ==
2040 lookup(capabilities.operandPerformance,
2041 static_cast<V1_2::OperandType>(type));
2042 });
2043}
2044
2045static bool isQuantized8PerformanceConsistentWithP(const V1_3::Capabilities& capabilities) {
2046 const PerformanceInfo quantized8Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002047 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM);
2048 return std::all_of(std::begin(kQuantized8PerformanceConsistentWithP),
2049 std::end(kQuantized8PerformanceConsistentWithP),
2050 [quantized8Performance, &capabilities](OperandType type) {
2051 return quantized8Performance ==
2052 lookup(capabilities.operandPerformance, type);
2053 });
2054}
2055
2056static hidl_vec<V1_2::Capabilities::OperandPerformance> makeQuantized8PerformanceConsistentWithP(
2057 PerformanceInfo quantized8Performance) {
2058 hidl_vec<V1_2::Capabilities::OperandPerformance> ret(
Colin Crossbd7f9c42019-10-10 22:58:13 +00002059 std::size(kQuantized8PerformanceConsistentWithP));
David Gross5dd79af2019-03-18 15:33:53 -07002060 std::transform(
2061 std::begin(kQuantized8PerformanceConsistentWithP),
2062 std::end(kQuantized8PerformanceConsistentWithP), ret.begin(),
2063 [quantized8Performance](OperandType type) -> V1_2::Capabilities::OperandPerformance {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002064 return {static_cast<V1_2::OperandType>(type), quantized8Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002065 });
2066 return ret;
2067}
2068
2069bool compliantWithV1_0(const V1_0::Capabilities&) {
David Gross47b59162018-02-22 15:05:01 -08002070 return true;
2071}
2072
2073bool compliantWithV1_0(const V1_1::Capabilities& capabilities) {
David Gross5dd79af2019-03-18 15:33:53 -07002074 return capabilities.relaxedFloat32toFloat16Performance == capabilities.float32Performance;
2075}
2076
2077bool compliantWithV1_0(const V1_2::Capabilities& capabilities) {
2078 const PerformanceInfo perfTensorFloat32 =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002079 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32);
2080 const PerformanceInfo perfFloat32 =
2081 lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32);
2082 if (perfTensorFloat32 != perfFloat32 ||
2083 perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
2084 perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
2085 return false;
2086 }
2087
2088 return isQuantized8PerformanceConsistentWithP(capabilities);
2089}
2090
2091bool compliantWithV1_0(const V1_3::Capabilities& capabilities) {
2092 const PerformanceInfo perfTensorFloat32 =
David Gross5dd79af2019-03-18 15:33:53 -07002093 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32);
2094 const PerformanceInfo perfFloat32 =
2095 lookup(capabilities.operandPerformance, OperandType::FLOAT32);
2096 if (perfTensorFloat32 != perfFloat32 ||
2097 perfTensorFloat32 != capabilities.relaxedFloat32toFloat16PerformanceTensor ||
2098 perfFloat32 != capabilities.relaxedFloat32toFloat16PerformanceScalar) {
2099 return false;
2100 }
2101
2102 return isQuantized8PerformanceConsistentWithP(capabilities);
David Gross47b59162018-02-22 15:05:01 -08002103}
2104
2105bool compliantWithV1_1(const V1_0::Capabilities&) {
2106 return true;
2107}
2108
2109bool compliantWithV1_1(const V1_1::Capabilities&) {
2110 return true;
2111}
2112
David Gross5dd79af2019-03-18 15:33:53 -07002113bool compliantWithV1_1(const V1_2::Capabilities& capabilities) {
2114 if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
2115 capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
Colin Crossbd7f9c42019-10-10 22:58:13 +00002116 (lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32) !=
2117 lookup(capabilities.operandPerformance, V1_2::OperandType::FLOAT32))) {
2118 return false;
2119 }
2120
2121 return isQuantized8PerformanceConsistentWithP(capabilities);
2122}
2123
2124bool compliantWithV1_1(const V1_3::Capabilities& capabilities) {
2125 if ((capabilities.relaxedFloat32toFloat16PerformanceTensor !=
2126 capabilities.relaxedFloat32toFloat16PerformanceScalar) ||
David Gross5dd79af2019-03-18 15:33:53 -07002127 (lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32) !=
2128 lookup(capabilities.operandPerformance, OperandType::FLOAT32))) {
2129 return false;
2130 }
2131
2132 return isQuantized8PerformanceConsistentWithP(capabilities);
2133}
2134
2135bool compliantWithV1_2(const V1_0::Capabilities&) {
2136 return true;
2137}
2138
2139bool compliantWithV1_2(const V1_1::Capabilities&) {
2140 return true;
2141}
2142
Colin Crossbd7f9c42019-10-10 22:58:13 +00002143bool compliantWithV1_2(const V1_2::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002144 return true;
2145}
2146
Colin Crossbd7f9c42019-10-10 22:58:13 +00002147bool compliantWithV1_2(const V1_3::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002148 return true;
2149}
2150
Colin Crossbd7f9c42019-10-10 22:58:13 +00002151bool compliantWithV1_3(const V1_0::Capabilities&) {
2152 return true;
2153}
2154
2155bool compliantWithV1_3(const V1_1::Capabilities&) {
2156 return true;
2157}
2158
2159bool compliantWithV1_3(const V1_2::Capabilities&) {
2160 return true;
2161}
2162
2163bool compliantWithV1_3(const V1_3::Capabilities&) {
Michael Butler75886e72018-01-23 11:05:43 -08002164 return true;
2165}
2166
Michael Butlerf690d312019-12-12 16:25:03 -08002167V1_0::ErrorStatus convertToV1_0(V1_0::ErrorStatus status) {
2168 return status;
2169}
2170
2171V1_0::ErrorStatus convertToV1_0(V1_3::ErrorStatus status) {
2172 switch (status) {
2173 case V1_3::ErrorStatus::NONE:
2174 return V1_0::ErrorStatus::NONE;
2175 case V1_3::ErrorStatus::DEVICE_UNAVAILABLE:
2176 return V1_0::ErrorStatus::DEVICE_UNAVAILABLE;
2177 case V1_3::ErrorStatus::GENERAL_FAILURE:
2178 return V1_0::ErrorStatus::GENERAL_FAILURE;
2179 case V1_3::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
2180 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
2181 case V1_3::ErrorStatus::INVALID_ARGUMENT:
2182 return V1_0::ErrorStatus::INVALID_ARGUMENT;
2183 case V1_3::ErrorStatus::MISSED_DEADLINE_TRANSIENT:
2184 return V1_0::ErrorStatus::GENERAL_FAILURE;
2185 case V1_3::ErrorStatus::MISSED_DEADLINE_PERSISTENT:
2186 return V1_0::ErrorStatus::GENERAL_FAILURE;
2187 case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_TRANSIENT:
2188 return V1_0::ErrorStatus::GENERAL_FAILURE;
2189 case V1_3::ErrorStatus::RESOURCE_EXHAUSTED_PERSISTENT:
2190 return V1_0::ErrorStatus::GENERAL_FAILURE;
2191 }
2192 LOG(ERROR) << "Unknown ErrorStatus: " << toString(status) << " mapped to GENERAL_FAILURE";
2193 return V1_0::ErrorStatus::GENERAL_FAILURE;
2194}
2195
2196V1_3::ErrorStatus convertToV1_3(V1_0::ErrorStatus status) {
2197 return static_cast<V1_3::ErrorStatus>(status);
2198}
2199
2200V1_3::ErrorStatus convertToV1_3(V1_3::ErrorStatus status) {
2201 return status;
2202}
2203
Slava Shklyaeva5055742018-10-15 14:58:25 +01002204static V1_0::OperationType uncheckedConvertToV1_0(V1_1::OperationType type) {
Michael Butler75886e72018-01-23 11:05:43 -08002205 return static_cast<V1_0::OperationType>(type);
2206}
2207
Lev Proleev6287d1e2019-10-31 18:12:27 +00002208static V1_0::OperationType uncheckedConvertToV1_0(V1_2::OperationType type) {
2209 return static_cast<V1_0::OperationType>(type);
2210}
2211
2212V1_0::OperationType uncheckedConvertToV1_0(V1_3::OperationType type) {
2213 return static_cast<V1_0::OperationType>(type);
2214}
2215
Slava Shklyaeva5055742018-10-15 14:58:25 +01002216static V1_1::OperationType convertToV1_1(V1_0::OperationType type) {
Michael Butler75886e72018-01-23 11:05:43 -08002217 return static_cast<V1_1::OperationType>(type);
2218}
2219
Lev Proleev6287d1e2019-10-31 18:12:27 +00002220static V1_1::OperationType uncheckedConvertToV1_1(V1_2::OperationType type) {
2221 return static_cast<V1_1::OperationType>(type);
2222}
2223
2224V1_1::OperationType uncheckedConvertToV1_1(V1_3::OperationType type) {
2225 return static_cast<V1_1::OperationType>(type);
2226}
2227
2228static V1_2::OperationType convertToV1_2(V1_0::OperationType type) {
2229 return static_cast<V1_2::OperationType>(type);
2230}
2231
2232static V1_2::OperationType convertToV1_2(V1_1::OperationType type) {
2233 return static_cast<V1_2::OperationType>(type);
2234}
2235
2236V1_2::OperationType uncheckedConvertToV1_2(V1_3::OperationType type) {
2237 return static_cast<V1_2::OperationType>(type);
2238}
2239
2240static V1_3::OperationType convertToV1_3(V1_0::OperationType type) {
2241 return static_cast<V1_3::OperationType>(type);
2242}
2243
2244static V1_3::OperationType convertToV1_3(V1_1::OperationType type) {
2245 return static_cast<V1_3::OperationType>(type);
2246}
2247
2248static V1_3::OperationType convertToV1_3(V1_2::OperationType type) {
2249 return static_cast<V1_3::OperationType>(type);
2250}
2251
David Gross47b59162018-02-22 15:05:01 -08002252V1_0::Capabilities convertToV1_0(const V1_0::Capabilities& capabilities) {
2253 return capabilities;
2254}
2255
2256V1_0::Capabilities convertToV1_0(const V1_1::Capabilities& capabilities) {
2257 if (!compliantWithV1_0(capabilities)) {
2258 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2259 << " from V1_1::Capabilities to V1_0::Capabilities";
2260 }
Michael Butler43953b82019-07-22 18:59:46 -07002261 return {.float32Performance = capabilities.float32Performance,
2262 .quantized8Performance = capabilities.quantized8Performance};
David Gross47b59162018-02-22 15:05:01 -08002263}
2264
David Gross5dd79af2019-03-18 15:33:53 -07002265V1_0::Capabilities convertToV1_0(const V1_2::Capabilities& capabilities) {
2266 if (!compliantWithV1_0(capabilities)) {
2267 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2268 << " from V1_2::Capabilities to V1_0::Capabilities";
2269 }
2270 return {.float32Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002271 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
2272 .quantized8Performance = lookup(capabilities.operandPerformance,
2273 V1_2::OperandType::TENSOR_QUANT8_ASYMM)};
2274}
2275
2276V1_0::Capabilities convertToV1_0(const V1_3::Capabilities& capabilities) {
2277 if (!compliantWithV1_0(capabilities)) {
2278 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2279 << " from V1_3::Capabilities to V1_0::Capabilities";
2280 }
2281 return {.float32Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002282 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
2283 .quantized8Performance =
2284 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM)};
2285}
2286
David Gross47b59162018-02-22 15:05:01 -08002287V1_1::Capabilities convertToV1_1(const V1_0::Capabilities& capabilities) {
Michael Butler43953b82019-07-22 18:59:46 -07002288 return {.float32Performance = capabilities.float32Performance,
2289 .quantized8Performance = capabilities.quantized8Performance,
2290 .relaxedFloat32toFloat16Performance = capabilities.float32Performance};
David Gross47b59162018-02-22 15:05:01 -08002291}
2292
2293V1_1::Capabilities convertToV1_1(const V1_1::Capabilities& capabilities) {
2294 return capabilities;
2295}
2296
David Gross5dd79af2019-03-18 15:33:53 -07002297V1_1::Capabilities convertToV1_1(const V1_2::Capabilities& capabilities) {
2298 if (!compliantWithV1_1(capabilities)) {
2299 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2300 << " from V1_2::Capabilities to V1_1::Capabilities";
2301 }
2302 return {.float32Performance =
Colin Crossbd7f9c42019-10-10 22:58:13 +00002303 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32),
2304 .quantized8Performance =
2305 lookup(capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM),
2306 .relaxedFloat32toFloat16Performance =
2307 capabilities.relaxedFloat32toFloat16PerformanceTensor};
2308}
2309
2310V1_1::Capabilities convertToV1_1(const V1_3::Capabilities& capabilities) {
2311 if (!compliantWithV1_1(capabilities)) {
2312 LOG(ERROR) << "Upcasting non-compliant capabilities " << toString(capabilities)
2313 << " from V1_3::Capabilities to V1_1::Capabilities";
2314 }
2315 return {.float32Performance =
David Gross5dd79af2019-03-18 15:33:53 -07002316 lookup(capabilities.operandPerformance, OperandType::TENSOR_FLOAT32),
2317 .quantized8Performance =
2318 lookup(capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM),
2319 .relaxedFloat32toFloat16Performance =
2320 capabilities.relaxedFloat32toFloat16PerformanceTensor};
2321}
2322
2323V1_2::Capabilities convertToV1_2(const V1_0::Capabilities& capabilities) {
2324 V1_2::Capabilities ret = {
2325 .relaxedFloat32toFloat16PerformanceScalar = capabilities.float32Performance,
2326 .relaxedFloat32toFloat16PerformanceTensor = capabilities.float32Performance,
2327 .operandPerformance =
2328 makeQuantized8PerformanceConsistentWithP(capabilities.quantized8Performance)};
2329 auto& opPerf = ret.operandPerformance;
2330 opPerf.resize(opPerf.size() + 2);
Colin Crossbd7f9c42019-10-10 22:58:13 +00002331 opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
2332 capabilities.float32Performance};
2333 opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002334 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2335 std::sort(opPerf.begin(), opPerf.end(),
2336 [](const OperandPerformance& a, const OperandPerformance& b) {
2337 return a.type < b.type;
2338 });
2339 return ret;
2340}
2341
2342V1_2::Capabilities convertToV1_2(const V1_1::Capabilities& capabilities) {
2343 V1_2::Capabilities ret = {.relaxedFloat32toFloat16PerformanceScalar =
2344 capabilities.relaxedFloat32toFloat16Performance,
2345 .relaxedFloat32toFloat16PerformanceTensor =
2346 capabilities.relaxedFloat32toFloat16Performance,
2347 .operandPerformance = makeQuantized8PerformanceConsistentWithP(
2348 capabilities.quantized8Performance)};
2349 auto& opPerf = ret.operandPerformance;
2350 opPerf.resize(opPerf.size() + 2);
Colin Crossbd7f9c42019-10-10 22:58:13 +00002351 opPerf[opPerf.size() - 2] = {V1_2::OperandType::TENSOR_FLOAT32,
2352 capabilities.float32Performance};
2353 opPerf[opPerf.size() - 1] = {V1_2::OperandType::FLOAT32, capabilities.float32Performance};
David Gross5dd79af2019-03-18 15:33:53 -07002354 using OperandPerformance = V1_2::Capabilities::OperandPerformance;
2355 std::sort(opPerf.begin(), opPerf.end(),
2356 [](const OperandPerformance& a, const OperandPerformance& b) {
2357 return a.type < b.type;
2358 });
2359 return ret;
2360}
2361
2362V1_2::Capabilities convertToV1_2(const V1_2::Capabilities& capabilities) {
2363 return capabilities;
2364}
2365
Colin Crossbd7f9c42019-10-10 22:58:13 +00002366V1_2::Capabilities convertToV1_2(const V1_3::Capabilities& capabilities) {
2367 V1_2::Capabilities ret = {
2368 .relaxedFloat32toFloat16PerformanceScalar =
2369 capabilities.relaxedFloat32toFloat16PerformanceScalar,
2370 .relaxedFloat32toFloat16PerformanceTensor =
2371 capabilities.relaxedFloat32toFloat16PerformanceTensor,
2372 };
2373 const auto& inputOpPerf = capabilities.operandPerformance;
2374 hidl_vec<V1_3::Capabilities::OperandPerformance> opPerfSupported;
2375 opPerfSupported.resize(inputOpPerf.size());
2376 auto last =
2377 std::copy_if(inputOpPerf.begin(), inputOpPerf.end(), opPerfSupported.begin(),
2378 [](V1_3::Capabilities::OperandPerformance opPerf) {
2379 return validOperandType(static_cast<V1_2::OperandType>(opPerf.type));
2380 });
2381 opPerfSupported.resize(std::distance(opPerfSupported.begin(), last));
2382
2383 auto& convertedOpPerf = ret.operandPerformance;
2384 convertedOpPerf.resize(opPerfSupported.size());
2385 std::transform(opPerfSupported.begin(), opPerfSupported.end(), convertedOpPerf.begin(),
2386 [](V1_3::Capabilities::OperandPerformance opPerf) {
2387 return V1_2::Capabilities::OperandPerformance{
2388 static_cast<V1_2::OperandType>(opPerf.type), opPerf.info};
2389 });
2390 return ret;
2391}
2392
2393V1_3::Capabilities convertToV1_3(const V1_0::Capabilities& capabilities) {
2394 return convertToV1_3(convertToV1_2(capabilities));
2395}
2396
2397V1_3::Capabilities convertToV1_3(const V1_1::Capabilities& capabilities) {
2398 return convertToV1_3(convertToV1_2(capabilities));
2399}
2400
2401V1_3::Capabilities convertToV1_3(const V1_2::Capabilities& capabilities) {
2402 V1_3::Capabilities ret = {
2403 .relaxedFloat32toFloat16PerformanceScalar =
2404 capabilities.relaxedFloat32toFloat16PerformanceScalar,
2405 .relaxedFloat32toFloat16PerformanceTensor =
2406 capabilities.relaxedFloat32toFloat16PerformanceTensor,
Slava Shklyaev4bdb9822020-01-23 16:32:04 +00002407 .ifPerformance = kNoPerformanceInfo,
2408 .whilePerformance = kNoPerformanceInfo,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002409 };
2410 auto& opPerf = ret.operandPerformance;
2411 opPerf.resize(capabilities.operandPerformance.size());
2412 std::transform(capabilities.operandPerformance.begin(), capabilities.operandPerformance.end(),
2413 opPerf.begin(), [](V1_2::Capabilities::OperandPerformance opPerf) {
2414 return V1_3::Capabilities::OperandPerformance{
2415 static_cast<V1_3::OperandType>(opPerf.type), opPerf.info};
2416 });
2417 return ret;
2418}
2419
2420V1_3::Capabilities convertToV1_3(const V1_3::Capabilities& capabilities) {
2421 return capabilities;
2422}
2423
Slava Shklyaeva5055742018-10-15 14:58:25 +01002424static V1_0::Operation uncheckedConvertToV1_0(const V1_1::Operation& operation) {
2425 return {.type = uncheckedConvertToV1_0(operation.type),
Michael Butler75886e72018-01-23 11:05:43 -08002426 .inputs = operation.inputs,
2427 .outputs = operation.outputs};
2428}
2429
Slava Shklyaeva5055742018-10-15 14:58:25 +01002430static V1_1::Operation convertToV1_1(const V1_0::Operation& operation) {
Michael Butler75886e72018-01-23 11:05:43 -08002431 return {.type = convertToV1_1(operation.type),
2432 .inputs = operation.inputs,
2433 .outputs = operation.outputs};
2434}
2435
Slava Shklyaeva5055742018-10-15 14:58:25 +01002436static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2437 const hidl_vec<V1_1::Operation>& operations) {
Michael Butler75886e72018-01-23 11:05:43 -08002438 hidl_vec<V1_0::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002439 std::transform(
2440 operations.begin(), operations.end(), result.begin(),
2441 [](const V1_1::Operation& operation) { return uncheckedConvertToV1_0(operation); });
Michael Butler75886e72018-01-23 11:05:43 -08002442 return result;
2443}
2444
2445static hidl_vec<V1_1::Operation> convertToV1_1(const hidl_vec<V1_0::Operation>& operations) {
2446 hidl_vec<V1_1::Operation> result(operations.size());
2447 std::transform(operations.begin(), operations.end(), result.begin(),
2448 [](const V1_0::Operation& operation) { return convertToV1_1(operation); });
2449 return result;
2450}
2451
Colin Crossbd7f9c42019-10-10 22:58:13 +00002452bool compliantWithV1_0(const V1_3::Operand& operand) {
Xusong Wangc5978942019-05-08 18:40:42 -07002453 return validOperandType(static_cast<V1_0::OperandType>(operand.type)) &&
2454 (nonExtensionOperandTypeIsScalar(static_cast<int>(operand.type)) ||
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002455 operand.dimensions.size() != 0) &&
2456 compliantWithV1_0(operand.lifetime);
Xusong Wangc5978942019-05-08 18:40:42 -07002457}
2458
Colin Crossbd7f9c42019-10-10 22:58:13 +00002459bool compliantWithV1_2(const V1_3::Operand& operand) {
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002460 return validOperandType(static_cast<V1_2::OperandType>(operand.type)) &&
2461 compliantWithV1_0(operand.lifetime);
Michael Butler75886e72018-01-23 11:05:43 -08002462}
2463
Colin Crossbd7f9c42019-10-10 22:58:13 +00002464bool compliantWithV1_3(const V1_3::Operand& operand) {
2465 return true;
Michael Butler75886e72018-01-23 11:05:43 -08002466}
2467
Colin Crossbd7f9c42019-10-10 22:58:13 +00002468static bool compliantWith(HalVersion version, const V1_3::Model& model,
David Grosse5a8e1e2019-05-09 15:55:53 -07002469 std::set<uint32_t>* noncompliantOperations) {
Xusong Wangc5978942019-05-08 18:40:42 -07002470 // A boolean vector indicating whether each pool is compliant with the target HAL version.
2471 std::vector<bool> isPoolCompliant(model.pools.size(), false);
2472 std::transform(model.pools.begin(), model.pools.end(), isPoolCompliant.begin(),
2473 [version](const hidl_memory& pool) { return validatePool(pool, version); });
2474
2475 // A boolean vector indicating whether each operand is compliant with the target HAL version.
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002476 std::vector<bool> isOperandCompliant(model.main.operands.size(), false);
2477 std::transform(model.main.operands.begin(), model.main.operands.end(),
2478 isOperandCompliant.begin(), [&isPoolCompliant, version](const Operand& op) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002479 bool is_operand_compliant = false;
2480 switch (version) {
2481 case HalVersion::UNKNOWN:
2482 is_operand_compliant = false;
2483 break;
2484 case HalVersion::V1_0:
2485 is_operand_compliant = compliantWithV1_0(op);
2486 break;
2487 case HalVersion::V1_1:
2488 // There is no V1_1::Operand -- both V1_0::Model
2489 // and V1_1::Model use V1_0::Operand.
2490 is_operand_compliant = compliantWithV1_0(op);
2491 break;
2492 case HalVersion::V1_2:
2493 is_operand_compliant = compliantWithV1_2(op);
2494 break;
2495 case HalVersion::V1_3:
2496 is_operand_compliant = compliantWithV1_3(op);
2497 break;
2498 }
2499 return is_operand_compliant &&
Xusong Wangc5978942019-05-08 18:40:42 -07002500 !(op.lifetime == OperandLifeTime::CONSTANT_REFERENCE &&
2501 !isPoolCompliant[op.location.poolIndex]);
2502 });
2503
2504 auto allOperandsCompliant = [&isOperandCompliant](const hidl_vec<uint32_t>& indices) {
2505 return std::all_of(
2506 indices.begin(), indices.end(),
2507 [&isOperandCompliant](const uint32_t ind) { return isOperandCompliant[ind]; });
2508 };
2509
Colin Crossbd7f9c42019-10-10 22:58:13 +00002510 auto localValidateOperation = [&model, version, &allOperandsCompliant](const Operation& op) {
Xusong Wangc5978942019-05-08 18:40:42 -07002511 if (!allOperandsCompliant(op.inputs) || !allOperandsCompliant(op.outputs)) return false;
David Grosse5a8e1e2019-05-09 15:55:53 -07002512 int error = validateOperation(
2513 static_cast<int32_t>(op.type), op.inputs.size(),
2514 op.inputs.size() > 0 ? op.inputs.data() : nullptr, op.outputs.size(),
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002515 op.outputs.size() > 0 ? op.outputs.data() : nullptr, model.main.operands, version);
David Grosse5a8e1e2019-05-09 15:55:53 -07002516 return error == ANEURALNETWORKS_NO_ERROR;
2517 };
2518
2519 if (noncompliantOperations) {
2520 CHECK(noncompliantOperations->empty());
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002521 for (uint32_t idx = 0; idx < model.main.operations.size(); ++idx) {
2522 if (!localValidateOperation(model.main.operations[idx])) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002523 noncompliantOperations->insert(idx);
2524 }
2525 }
2526 return noncompliantOperations->empty();
2527 } else {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002528 return std::all_of(model.main.operations.begin(), model.main.operations.end(),
David Grosse5a8e1e2019-05-09 15:55:53 -07002529 localValidateOperation);
2530 }
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002531}
2532
Colin Crossbd7f9c42019-10-10 22:58:13 +00002533bool compliantWithV1_0(const V1_0::Model& model) {
2534 return true;
2535}
2536
2537bool compliantWithV1_0(const V1_1::Model& model) {
2538 // In addition to new enumeration values being introduced in V1_1::Model, a
2539 // new flag was introduced to indicate whether or not float32 data can be
2540 // calculated using float16 units. This 'relaxComputationFloat32toFloat16'
2541 // flag is not relevant in whether a V1_1::Model is compliant with a
2542 // V1_0::Model because all 1.0 drivers require strict calculation by default
2543 // in the P NN runtime. Even if fp16 calculations are allowed, they can
2544 // still be computed by a strict fp32 driver.
2545 return std::all_of(
2546 model.operations.begin(), model.operations.end(), [&model](const V1_1::Operation& op) {
2547 int error = validateOperation(static_cast<int32_t>(op.type), op.inputs.size(),
2548 op.inputs.size() > 0 ? op.inputs.data() : nullptr,
2549 op.outputs.size(),
2550 op.outputs.size() > 0 ? op.outputs.data() : nullptr,
2551 convertToV1_3(model.operands), HalVersion::V1_0);
2552 return error == ANEURALNETWORKS_NO_ERROR;
2553 });
2554}
2555
David Grosse5a8e1e2019-05-09 15:55:53 -07002556bool compliantWithV1_0(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002557 return compliantWith(HalVersion::V1_0, convertToV1_3(model), noncompliantOperations);
2558}
2559
2560bool compliantWithV1_0(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002561 return compliantWith(HalVersion::V1_0, model, noncompliantOperations);
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002562}
2563
Colin Crossbd7f9c42019-10-10 22:58:13 +00002564bool compliantWithV1_1(const V1_0::Model&) {
2565 return true;
2566}
2567
2568bool compliantWithV1_1(const V1_1::Model&) {
2569 return true;
2570}
2571
David Grosse5a8e1e2019-05-09 15:55:53 -07002572bool compliantWithV1_1(const V1_2::Model& model, std::set<uint32_t>* noncompliantOperations) {
Colin Crossbd7f9c42019-10-10 22:58:13 +00002573 return compliantWith(HalVersion::V1_1, convertToV1_3(model), noncompliantOperations);
2574}
2575
2576bool compliantWithV1_1(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
David Grosse5a8e1e2019-05-09 15:55:53 -07002577 return compliantWith(HalVersion::V1_1, model, noncompliantOperations);
2578}
2579
Colin Crossbd7f9c42019-10-10 22:58:13 +00002580bool compliantWithV1_2(const V1_0::Model&) {
2581 return true;
2582}
2583
2584bool compliantWithV1_2(const V1_1::Model&) {
2585 return true;
2586}
2587
2588bool compliantWithV1_2(const V1_2::Model&, std::set<uint32_t>* noncompliantOperations) {
2589 return true;
2590}
2591
2592bool compliantWithV1_2(const V1_3::Model& model, std::set<uint32_t>* noncompliantOperations) {
2593 return compliantWith(HalVersion::V1_2, model, noncompliantOperations);
2594}
2595
Slava Shklyaeva5055742018-10-15 14:58:25 +01002596static V1_0::Operation uncheckedConvertToV1_0(const V1_2::Operation& operation) {
2597 return {.type = uncheckedConvertToV1_0(operation.type),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002598 .inputs = operation.inputs,
2599 .outputs = operation.outputs};
2600}
2601
Colin Crossbd7f9c42019-10-10 22:58:13 +00002602static V1_0::Operation uncheckedConvertToV1_0(const V1_3::Operation& operation) {
2603 return {.type = uncheckedConvertToV1_0(operation.type),
2604 .inputs = operation.inputs,
2605 .outputs = operation.outputs};
2606}
2607
Slava Shklyaeva5055742018-10-15 14:58:25 +01002608static V1_1::Operation uncheckedConvertToV1_1(const V1_2::Operation& operation) {
2609 return {.type = uncheckedConvertToV1_1(operation.type),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002610 .inputs = operation.inputs,
2611 .outputs = operation.outputs};
2612}
2613
Colin Crossbd7f9c42019-10-10 22:58:13 +00002614static V1_1::Operation uncheckedConvertToV1_1(const V1_3::Operation& operation) {
2615 return {.type = uncheckedConvertToV1_1(operation.type),
2616 .inputs = operation.inputs,
2617 .outputs = operation.outputs};
2618}
2619
Slava Shklyaeva5055742018-10-15 14:58:25 +01002620static V1_2::Operation convertToV1_2(const V1_0::Operation& operation) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002621 return {.type = convertToV1_2(operation.type),
2622 .inputs = operation.inputs,
2623 .outputs = operation.outputs};
2624}
2625
Slava Shklyaeva5055742018-10-15 14:58:25 +01002626static V1_2::Operation convertToV1_2(const V1_1::Operation& operation) {
2627 return {.type = convertToV1_2(operation.type),
2628 .inputs = operation.inputs,
2629 .outputs = operation.outputs};
2630}
2631
Colin Crossbd7f9c42019-10-10 22:58:13 +00002632static V1_2::Operation uncheckedConvertToV1_2(const V1_3::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002633 return {.type = uncheckedConvertToV1_2(operation.type),
2634 .inputs = operation.inputs,
2635 .outputs = operation.outputs};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002636}
2637
2638static V1_3::Operation convertToV1_3(const V1_0::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002639 return {.type = convertToV1_3(operation.type),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002640 .inputs = operation.inputs,
2641 .outputs = operation.outputs};
2642}
2643
2644static V1_3::Operation convertToV1_3(const V1_1::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002645 return {.type = convertToV1_3(operation.type),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002646 .inputs = operation.inputs,
2647 .outputs = operation.outputs};
2648}
2649
2650static V1_3::Operation convertToV1_3(const V1_2::Operation& operation) {
Lev Proleev6287d1e2019-10-31 18:12:27 +00002651 return {.type = convertToV1_3(operation.type),
2652 .inputs = operation.inputs,
2653 .outputs = operation.outputs};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002654}
2655
2656static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2657 const hidl_vec<V1_3::Operation>& operations) {
2658 hidl_vec<V1_0::Operation> result(operations.size());
2659 std::transform(
2660 operations.begin(), operations.end(), result.begin(),
2661 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_0(operation); });
2662 return result;
2663}
2664
Slava Shklyaeva5055742018-10-15 14:58:25 +01002665static hidl_vec<V1_0::Operation> uncheckedConvertToV1_0(
2666 const hidl_vec<V1_2::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002667 hidl_vec<V1_0::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002668 std::transform(
2669 operations.begin(), operations.end(), result.begin(),
2670 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_0(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002671 return result;
2672}
2673
Colin Crossbd7f9c42019-10-10 22:58:13 +00002674static hidl_vec<V1_2::Operation> uncheckedConvertToV1_2(
2675 const hidl_vec<V1_3::Operation>& operations) {
2676 hidl_vec<V1_2::Operation> result(operations.size());
2677 std::transform(
2678 operations.begin(), operations.end(), result.begin(),
2679 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_2(operation); });
2680 return result;
2681}
2682
Slava Shklyaeva5055742018-10-15 14:58:25 +01002683static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
2684 const hidl_vec<V1_2::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002685 hidl_vec<V1_1::Operation> result(operations.size());
Slava Shklyaeva5055742018-10-15 14:58:25 +01002686 std::transform(
2687 operations.begin(), operations.end(), result.begin(),
2688 [](const V1_2::Operation& operation) { return uncheckedConvertToV1_1(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002689 return result;
2690}
2691
Colin Crossbd7f9c42019-10-10 22:58:13 +00002692static hidl_vec<V1_1::Operation> uncheckedConvertToV1_1(
2693 const hidl_vec<V1_3::Operation>& operations) {
2694 hidl_vec<V1_1::Operation> result(operations.size());
2695 std::transform(
2696 operations.begin(), operations.end(), result.begin(),
2697 [](const V1_3::Operation& operation) { return uncheckedConvertToV1_1(operation); });
2698 return result;
2699}
2700
Slava Shklyaeva5055742018-10-15 14:58:25 +01002701static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_0::Operation>& operations) {
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002702 hidl_vec<V1_2::Operation> result(operations.size());
2703 std::transform(operations.begin(), operations.end(), result.begin(),
Slava Shklyaeva5055742018-10-15 14:58:25 +01002704 [](const V1_0::Operation& operation) { return convertToV1_2(operation); });
2705 return result;
2706}
2707
2708static hidl_vec<V1_2::Operation> convertToV1_2(const hidl_vec<V1_1::Operation>& operations) {
2709 hidl_vec<V1_2::Operation> result(operations.size());
2710 std::transform(operations.begin(), operations.end(), result.begin(),
2711 [](const V1_1::Operation& operation) { return convertToV1_2(operation); });
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002712 return result;
2713}
2714
Colin Crossbd7f9c42019-10-10 22:58:13 +00002715static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_0::Operation>& operations) {
2716 hidl_vec<V1_3::Operation> result(operations.size());
2717 std::transform(operations.begin(), operations.end(), result.begin(),
2718 [](const V1_0::Operation& operation) { return convertToV1_3(operation); });
2719 return result;
2720}
2721
2722static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_1::Operation>& operations) {
2723 hidl_vec<V1_3::Operation> result(operations.size());
2724 std::transform(operations.begin(), operations.end(), result.begin(),
2725 [](const V1_1::Operation& operation) { return convertToV1_3(operation); });
2726 return result;
2727}
2728
2729static hidl_vec<V1_3::Operation> convertToV1_3(const hidl_vec<V1_2::Operation>& operations) {
2730 hidl_vec<V1_3::Operation> result(operations.size());
2731 std::transform(operations.begin(), operations.end(), result.begin(),
2732 [](const V1_2::Operation& operation) { return convertToV1_3(operation); });
2733 return result;
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002734}
2735
Slava Shklyaeva5055742018-10-15 14:58:25 +01002736static bool compliantWithV1_0(const V1_2::OperandType& operandType) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002737 return validOperandType(static_cast<V1_0::OperandType>(operandType));
2738}
2739
Colin Crossbd7f9c42019-10-10 22:58:13 +00002740static bool compliantWithV1_0(const V1_3::OperandType& operandType) {
2741 return validOperandType(static_cast<V1_0::OperandType>(operandType));
2742}
2743
2744static bool compliantWithV1_2(const V1_3::OperandType& operandType) {
2745 return validOperandType(static_cast<V1_2::OperandType>(operandType));
2746}
2747
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002748V1_0::OperandType convertToV1_0(const V1_2::OperandType& operandType) {
2749 if (!compliantWithV1_0(operandType)) {
2750 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
Colin Crossbd7f9c42019-10-10 22:58:13 +00002751 << " from V1_2::OperandType to V1_0::OperandType";
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002752 }
2753 return static_cast<V1_0::OperandType>(operandType);
2754}
2755
Colin Crossbd7f9c42019-10-10 22:58:13 +00002756V1_2::OperandType convertToV1_2(const V1_0::OperandType& operandType) {
2757 return static_cast<V1_2::OperandType>(operandType);
2758}
2759
2760V1_2::OperandType convertToV1_2(const V1_3::OperandType& operandType) {
2761 if (!compliantWithV1_2(operandType)) {
2762 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
2763 << " from V1_3::OperandType to V1_2::OperandType";
2764 }
2765 return static_cast<V1_2::OperandType>(operandType);
2766}
2767
2768V1_0::OperandType convertToV1_0(const V1_3::OperandType& operandType) {
2769 if (!compliantWithV1_0(operandType)) {
2770 LOG(ERROR) << "Upcasting non-compliant operand type " << toString(operandType)
2771 << " from V1_3::Operand to V1_0::Operand";
2772 }
2773 return static_cast<V1_0::OperandType>(operandType);
2774}
2775
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002776bool compliantWithV1_0(hal::V1_0::OperandLifeTime lifetime) {
2777 return true;
2778}
2779
2780bool compliantWithV1_0(hal::V1_3::OperandLifeTime lifetime) {
2781 return lifetime != V1_3::OperandLifeTime::SUBGRAPH;
2782}
2783
2784bool compliantWithV1_3(hal::V1_0::OperandLifeTime lifetime) {
2785 return true;
2786}
2787
2788bool compliantWithV1_3(hal::V1_3::OperandLifeTime lifetime) {
2789 return true;
2790}
2791
2792V1_0::OperandLifeTime convertToV1_0(V1_0::OperandLifeTime lifetime) {
2793 return lifetime;
2794}
2795
2796V1_0::OperandLifeTime convertToV1_0(V1_3::OperandLifeTime lifetime) {
2797 if (!compliantWithV1_0(lifetime)) {
2798 LOG(ERROR) << "Upcasting non-compliant lifetime " << toString(lifetime)
2799 << " from V1_3 to V1_0";
2800 }
2801 return static_cast<V1_0::OperandLifeTime>(lifetime);
2802}
2803
2804V1_3::OperandLifeTime convertToV1_3(V1_0::OperandLifeTime lifetime) {
2805 return static_cast<V1_3::OperandLifeTime>(lifetime);
2806}
2807
2808V1_3::OperandLifeTime convertToV1_3(V1_3::OperandLifeTime lifetime) {
2809 return lifetime;
2810}
2811
Colin Crossbd7f9c42019-10-10 22:58:13 +00002812V1_0::Operand convertToV1_0(const V1_2::Operand& operand) {
2813 return {.type = convertToV1_0(operand.type),
2814 .dimensions = operand.dimensions,
2815 .numberOfConsumers = operand.numberOfConsumers,
2816 .scale = operand.scale,
2817 .zeroPoint = operand.zeroPoint,
2818 .lifetime = convertToV1_0(operand.lifetime),
2819 .location = operand.location};
2820}
2821
2822V1_0::Operand convertToV1_0(const V1_3::Operand& operand) {
2823 return {.type = convertToV1_0(operand.type),
2824 .dimensions = operand.dimensions,
2825 .numberOfConsumers = operand.numberOfConsumers,
2826 .scale = operand.scale,
2827 .zeroPoint = operand.zeroPoint,
2828 .lifetime = convertToV1_0(operand.lifetime),
2829 .location = operand.location};
2830}
2831
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002832V1_2::Operand convertToV1_2(const V1_0::Operand& operand) {
2833 return {.type = convertToV1_2(operand.type),
2834 .dimensions = operand.dimensions,
2835 .numberOfConsumers = operand.numberOfConsumers,
2836 .scale = operand.scale,
2837 .zeroPoint = operand.zeroPoint,
2838 .lifetime = operand.lifetime,
2839 .location = operand.location};
2840}
2841
Colin Crossbd7f9c42019-10-10 22:58:13 +00002842V1_2::Operand convertToV1_2(const V1_3::Operand& operand) {
2843 return {.type = convertToV1_2(operand.type),
2844 .dimensions = operand.dimensions,
2845 .numberOfConsumers = operand.numberOfConsumers,
2846 .scale = operand.scale,
2847 .zeroPoint = operand.zeroPoint,
2848 .lifetime = static_cast<V1_0::OperandLifeTime>(operand.lifetime),
2849 .location = operand.location,
Michael Butler2d3826e2020-02-04 16:08:11 -08002850 .extraParams = operand.extraParams};
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002851}
2852
Colin Crossbd7f9c42019-10-10 22:58:13 +00002853V1_3::Operand convertToV1_3(const V1_0::Operand& operand) {
2854 return {.type = static_cast<V1_3::OperandType>(operand.type),
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002855 .dimensions = operand.dimensions,
2856 .numberOfConsumers = operand.numberOfConsumers,
2857 .scale = operand.scale,
2858 .zeroPoint = operand.zeroPoint,
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002859 .lifetime = convertToV1_3(operand.lifetime),
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002860 .location = operand.location};
2861}
2862
Colin Crossbd7f9c42019-10-10 22:58:13 +00002863V1_3::Operand convertToV1_3(const V1_2::Operand& operand) {
2864 return {.type = static_cast<V1_3::OperandType>(operand.type),
2865 .dimensions = operand.dimensions,
2866 .numberOfConsumers = operand.numberOfConsumers,
2867 .scale = operand.scale,
2868 .zeroPoint = operand.zeroPoint,
Slava Shklyaevec9bae62019-12-13 16:39:08 +00002869 .lifetime = convertToV1_3(operand.lifetime),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002870 .location = operand.location,
Michael Butler2d3826e2020-02-04 16:08:11 -08002871 .extraParams = operand.extraParams};
Colin Crossbd7f9c42019-10-10 22:58:13 +00002872}
2873
2874V1_3::Operand convertToV1_3(const V1_3::Operand& operand) {
2875 return operand;
2876}
2877
2878hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_0::Operand>& operands) {
2879 return operands;
2880}
2881
2882hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_2::Operand>& operands) {
2883 hidl_vec<V1_0::Operand> result(operands.size());
2884 std::transform(operands.begin(), operands.end(), result.begin(),
2885 [](const V1_2::Operand& operand) { return convertToV1_0(operand); });
2886 return result;
2887}
2888
2889hidl_vec<V1_0::Operand> convertToV1_0(const hidl_vec<V1_3::Operand>& operands) {
2890 hidl_vec<V1_0::Operand> result(operands.size());
2891 std::transform(operands.begin(), operands.end(), result.begin(),
2892 [](const V1_3::Operand& operand) { return convertToV1_0(operand); });
2893 return result;
2894}
2895
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002896hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_0::Operand>& operands) {
2897 hidl_vec<V1_2::Operand> result(operands.size());
2898 std::transform(operands.begin(), operands.end(), result.begin(),
2899 [](const V1_0::Operand& operand) { return convertToV1_2(operand); });
2900 return result;
2901}
2902
2903hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_2::Operand>& operands) {
2904 return operands;
2905}
2906
Colin Crossbd7f9c42019-10-10 22:58:13 +00002907hidl_vec<V1_2::Operand> convertToV1_2(const hidl_vec<V1_3::Operand>& operands) {
2908 hidl_vec<V1_2::Operand> result(operands.size());
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002909 std::transform(operands.begin(), operands.end(), result.begin(),
Colin Crossbd7f9c42019-10-10 22:58:13 +00002910 [](const V1_3::Operand& operand) { return convertToV1_2(operand); });
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002911 return result;
2912}
2913
Colin Crossbd7f9c42019-10-10 22:58:13 +00002914hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_0::Operand>& operands) {
2915 hidl_vec<V1_3::Operand> result(operands.size());
2916 std::transform(operands.begin(), operands.end(), result.begin(),
2917 [](const V1_0::Operand& operand) { return convertToV1_3(operand); });
2918 return result;
2919}
2920
2921hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_2::Operand>& operands) {
2922 hidl_vec<V1_3::Operand> result(operands.size());
2923 std::transform(operands.begin(), operands.end(), result.begin(),
2924 [](const V1_2::Operand& operand) { return convertToV1_3(operand); });
2925 return result;
2926}
2927
2928hidl_vec<V1_3::Operand> convertToV1_3(const hidl_vec<V1_3::Operand>& operands) {
2929 return operands;
2930}
2931
2932V1_0::Model convertToV1_0(const V1_0::Model& model) {
2933 return model;
2934}
2935
2936V1_0::Model convertToV1_0(const V1_1::Model& model) {
2937 if (!compliantWithV1_0(model)) {
2938 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2939 << " from V1_1::Model to V1_0::Model";
2940 }
2941 return {.operands = model.operands,
2942 .operations = uncheckedConvertToV1_0(model.operations),
2943 .inputIndexes = model.inputIndexes,
2944 .outputIndexes = model.outputIndexes,
2945 .operandValues = model.operandValues,
2946 .pools = model.pools};
2947}
2948
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002949V1_0::Model convertToV1_0(const V1_2::Model& model) {
2950 if (!compliantWithV1_0(model)) {
2951 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
Slava Shklyaeva5055742018-10-15 14:58:25 +01002952 << " from V1_2::Model to V1_0::Model";
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002953 }
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002954 return {.operands = convertToV1_0(model.operands),
Slava Shklyaeva5055742018-10-15 14:58:25 +01002955 .operations = uncheckedConvertToV1_0(model.operations),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002956 .inputIndexes = model.inputIndexes,
2957 .outputIndexes = model.outputIndexes,
2958 .operandValues = model.operandValues,
2959 .pools = model.pools};
2960}
2961
Colin Crossbd7f9c42019-10-10 22:58:13 +00002962V1_0::Model convertToV1_0(const V1_3::Model& model) {
2963 if (!compliantWithV1_0(model)) {
2964 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2965 << " from V1_3::Model to V1_0::Model";
2966 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00002967 return {.operands = convertToV1_0(model.main.operands),
2968 .operations = uncheckedConvertToV1_0(model.main.operations),
2969 .inputIndexes = model.main.inputIndexes,
2970 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00002971 .operandValues = model.operandValues,
2972 .pools = model.pools};
2973}
2974
2975V1_1::Model convertToV1_1(const V1_0::Model& model) {
2976 return {.operands = model.operands,
2977 .operations = convertToV1_1(model.operations),
2978 .inputIndexes = model.inputIndexes,
2979 .outputIndexes = model.outputIndexes,
2980 .operandValues = model.operandValues,
2981 .pools = model.pools,
2982 .relaxComputationFloat32toFloat16 = false};
2983}
2984
2985V1_1::Model convertToV1_1(const V1_1::Model& model) {
2986 return model;
2987}
2988
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002989V1_1::Model convertToV1_1(const V1_2::Model& model) {
Slava Shklyaeva5055742018-10-15 14:58:25 +01002990 if (!compliantWithV1_1(model)) {
2991 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
2992 << " from V1_2::Model to V1_1::Model";
2993 }
Lev Proleevfeafd0b2018-10-02 14:15:58 +01002994 return {.operands = convertToV1_0(model.operands), // Operands in 1.1 and 1.0 are identical.
Slava Shklyaeva5055742018-10-15 14:58:25 +01002995 .operations = uncheckedConvertToV1_1(model.operations),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01002996 .inputIndexes = model.inputIndexes,
2997 .outputIndexes = model.outputIndexes,
2998 .operandValues = model.operandValues,
2999 .pools = model.pools,
3000 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3001}
3002
Colin Crossbd7f9c42019-10-10 22:58:13 +00003003V1_1::Model convertToV1_1(const V1_3::Model& model) {
3004 if (!compliantWithV1_1(model)) {
3005 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
3006 << " from V1_3::Model to V1_1::Model";
3007 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003008 return {// Operands in 1.1 and 1.0 are identical.
3009 .operands = convertToV1_0(model.main.operands),
3010 .operations = uncheckedConvertToV1_1(model.main.operations),
3011 .inputIndexes = model.main.inputIndexes,
3012 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00003013 .operandValues = model.operandValues,
3014 .pools = model.pools,
3015 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3016}
3017
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003018V1_2::Model convertToV1_2(const V1_0::Model& model) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01003019 return {.operands = convertToV1_2(model.operands),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003020 .operations = convertToV1_2(model.operations),
3021 .inputIndexes = model.inputIndexes,
3022 .outputIndexes = model.outputIndexes,
3023 .operandValues = model.operandValues,
3024 .pools = model.pools,
3025 .relaxComputationFloat32toFloat16 = false};
3026}
3027
3028V1_2::Model convertToV1_2(const V1_1::Model& model) {
Lev Proleevfeafd0b2018-10-02 14:15:58 +01003029 return {.operands = convertToV1_2(model.operands),
Slava Shklyaev4475d8f2018-09-14 13:34:31 +01003030 .operations = convertToV1_2(model.operations),
3031 .inputIndexes = model.inputIndexes,
3032 .outputIndexes = model.outputIndexes,
3033 .operandValues = model.operandValues,
3034 .pools = model.pools,
3035 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3036}
3037
Xusong Wangc7734f32018-11-05 09:59:30 -08003038V1_2::Model convertToV1_2(const V1_2::Model& model) {
3039 return model;
3040}
3041
Colin Crossbd7f9c42019-10-10 22:58:13 +00003042V1_2::Model convertToV1_2(const V1_3::Model& model) {
3043 if (!compliantWithV1_2(model)) {
3044 LOG(ERROR) << "Upcasting non-compliant model " << SHOW_IF_DEBUG(toString(model))
3045 << " from V1_3::Model to V1_2::Model";
3046 }
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003047 return {.operands = convertToV1_2(model.main.operands),
3048 .operations = uncheckedConvertToV1_2(model.main.operations),
3049 .inputIndexes = model.main.inputIndexes,
3050 .outputIndexes = model.main.outputIndexes,
Colin Crossbd7f9c42019-10-10 22:58:13 +00003051 .operandValues = model.operandValues,
3052 .pools = model.pools,
3053 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
Slava Shklyaev57941102019-12-13 17:02:29 +00003054 .extensionNameToPrefix = model.extensionNameToPrefix};
Colin Crossbd7f9c42019-10-10 22:58:13 +00003055}
3056
3057V1_3::Model convertToV1_3(const V1_0::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003058 return {.main = {.operands = convertToV1_3(model.operands),
3059 .operations = convertToV1_3(model.operations),
3060 .inputIndexes = model.inputIndexes,
3061 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003062 .operandValues = model.operandValues,
3063 .pools = model.pools,
3064 .relaxComputationFloat32toFloat16 = false};
3065}
3066
3067V1_3::Model convertToV1_3(const V1_1::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003068 return {.main = {.operands = convertToV1_3(model.operands),
3069 .operations = convertToV1_3(model.operations),
3070 .inputIndexes = model.inputIndexes,
3071 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003072 .operandValues = model.operandValues,
3073 .pools = model.pools,
3074 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16};
3075}
3076
3077V1_3::Model convertToV1_3(const V1_2::Model& model) {
Slava Shklyaev8de7a222019-12-13 18:05:41 +00003078 return {.main = {.operands = convertToV1_3(model.operands),
3079 .operations = convertToV1_3(model.operations),
3080 .inputIndexes = model.inputIndexes,
3081 .outputIndexes = model.outputIndexes},
Colin Crossbd7f9c42019-10-10 22:58:13 +00003082 .operandValues = model.operandValues,
3083 .pools = model.pools,
3084 .relaxComputationFloat32toFloat16 = model.relaxComputationFloat32toFloat16,
Slava Shklyaev57941102019-12-13 17:02:29 +00003085 .extensionNameToPrefix = model.extensionNameToPrefix};
Colin Crossbd7f9c42019-10-10 22:58:13 +00003086}
3087
3088V1_3::Model convertToV1_3(const V1_3::Model& model) {
3089 return model;
3090}
3091
Xusong Wang7ac6c9d2020-01-08 16:52:37 -08003092bool compliantWithV1_0(const V1_0::Request& request) {
3093 return true;
3094}
3095
3096bool compliantWithV1_0(const V1_3::Request& request) {
3097 return std::all_of(request.pools.begin(), request.pools.end(), [](const auto& pool) {
3098 return pool.getDiscriminator() == V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory;
3099 });
3100}
3101
3102static hidl_memory convertToV1_0(const V1_3::Request::MemoryPool& pool) {
3103 switch (pool.getDiscriminator()) {
3104 case V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory:
3105 return pool.hidlMemory();
3106 case V1_3::Request::MemoryPool::hidl_discriminator::token:
3107 return hidl_memory{};
3108 }
3109}
3110
3111static V1_3::Request::MemoryPool convertToV1_3(const hidl_memory& pool) {
3112 V1_3::Request::MemoryPool ret;
3113 ret.hidlMemory(pool);
3114 return ret;
3115}
3116
3117V1_0::Request convertToV1_0(const V1_0::Request& request) {
3118 return request;
3119}
3120
3121V1_0::Request convertToV1_0(const V1_3::Request& request) {
3122 if (!compliantWithV1_0(request)) {
3123 LOG(ERROR) << "Upcasting non-compliant request " << SHOW_IF_DEBUG(toString(request))
3124 << " from V1_3::Request to V1_0::Request";
3125 }
3126 hidl_vec<hidl_memory> pools(request.pools.size());
3127 std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
3128 [](const auto& pool) { return convertToV1_0(pool); });
3129 return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
3130}
3131
3132V1_3::Request convertToV1_3(const V1_0::Request& request) {
3133 hidl_vec<V1_3::Request::MemoryPool> pools(request.pools.size());
3134 std::transform(request.pools.begin(), request.pools.end(), pools.begin(),
3135 [](const auto& pool) { return convertToV1_3(pool); });
3136 return {.inputs = request.inputs, .outputs = request.outputs, .pools = std::move(pools)};
3137}
3138
3139V1_3::Request convertToV1_3(const V1_3::Request& request) {
3140 return request;
3141}
3142
David Gross0b9453e2017-09-22 17:16:51 -07003143#ifdef NN_DEBUGGABLE
David Grossa2a03632017-10-03 12:49:47 -07003144uint32_t getProp(const char* str, uint32_t defaultValue) {
Miao Wang820215d2017-10-04 19:45:45 -07003145 const std::string propStr = android::base::GetProperty(str, "");
3146 if (propStr.size() > 0) {
3147 return std::stoi(propStr);
David Grossa2a03632017-10-03 12:49:47 -07003148 } else {
3149 return defaultValue;
3150 }
David Gross0b9453e2017-09-22 17:16:51 -07003151}
David Gross0b9453e2017-09-22 17:16:51 -07003152#endif // NN_DEBUGGABLE
3153
Michael Butler43953b82019-07-22 18:59:46 -07003154} // namespace nn
3155} // namespace android